Copyright © 2026 Authors retain the copyright of this article. This article is an open access article distributed under the Creative Commons Attribution License which permits unrestricted use, distribution, and reproduction in any medium, provided the original work is properly cited.
@article{196467,
author = {Sanket Dhage and Sahil Ghorpade and Satyam Jha and Vikas Gupta},
title = {SAYLINK: A Voice-Controlled Social Media Web Application Using Speech Recognition, NLU and TTS for Accessible Interaction},
journal = {International Journal of Innovative Research in Technology},
year = {2026},
volume = {12},
number = {11},
pages = {3854-3862},
issn = {2349-6002},
url = {https://ijirt.org/article?manuscript=196467},
abstract = {Voice-driven interaction is emerging as a significant advancement in human computer communication, enabling hands-free operation and improving accessibility across digital platforms [1], [2]. Traditional social media interfaces rely on touch and typing, which may limit usability for multitasking users and individuals with motor impairments. This paper presents SAYLINK, a voice-controlled social media web application that integrates Speech-to-Text (STT), Natural Language Understanding (NLU), and Text-to-Speech (TTS) to execute actions such as liking posts, commenting, following users, searching content, and navigating between pages using spoken commands. The system architecture combines a React frontend for user interaction, client-side speech processing and command interpretation with a Node.js backend for application logic, and MongoDB for persistent data storage. The approach demonstrates a practical implementation of voice-driven interaction within a social media environment, aligning with voice-driven web interaction and command-based automation systems [3], [5]. Experimental evaluation demonstrates an average STT accuracy of 92.3% in quiet settings and 87.0% under moderate background noise, with an average response latency of 1.45 seconds, indicating efficient and reliable hands-free performance. SAYLINK highlights the potential of voice-based social platforms to enhance accessibility, convenience, and inclusivity while offering opportunities for future expansion to multilingual and adaptive NLU models [6].},
keywords = {Voice Recognition, Web Speech API, React.js, Node.js, Express.js, MongoDB, Socket.IO, RESTful APIs, Event-Driven Architecture, Hands-Free Interaction, Social Media Interface, Accessibility.},
month = {April},
}
Submit your research paper and those of your network (friends, colleagues, or peers) through your IPN account, and receive 800 INR for each paper that gets published.
Join NowNational Conference on Sustainable Engineering and Management - 2024 Last Date: 15th March 2024
Submit inquiry