Copyright © 2026 Authors retain the copyright of this article. This article is an open access article distributed under the Creative Commons Attribution License which permits unrestricted use, distribution, and reproduction in any medium, provided the original work is properly cited.
@article{158770,
author = {Nikhitha LP and Kunal Pandya and Satwik DP and Sajala MP and Indushree M},
title = {Optimizing Neural Network For Deployment On Edge Devices},
journal = {International Journal of Innovative Research in Technology},
year = {},
volume = {9},
number = {10},
pages = {706-710},
issn = {2349-6002},
url = {https://ijirt.org/article?manuscript=158770},
abstract = {Networks like convolution neural networks and their derivatives have contributed to the area of computer vision's explosive growth in recent years. Such a network however cannot be deployed on the edge device because of the high computational cost and memory requirements for model storage.Edge computing can address problems with latency, connectivity, cost, and privacy, but edge devices still face difficulties due to the deep learning model's high resource requirements. A big network-sized CNN model with more floating-point operations is required, particularly for deep learning-basedapplications. In order to address those issues, this study provides a strategy for deploying deep learning models to edge devices. The neural network has been optimized utilizing memory and computation-saving methods like pruning, weight clusteringand quantization.},
keywords = {Networks like convolution neural networks and their derivatives have contributed to the area of computer vision's explosive growth in recent years. Such a network however cannot be deployed on the edge device because of the high computational cost and memory requirements for model storage.Edge computing can address problems with latency, connectivity, cost, and privacy, but edge devices still face difficulties due to the deep learning model's high resource requirements. A big network-sized CNN model with more floating-point operations is required, particularly for deep learning-basedapplications. In order to address those issues, this study provides a strategy for deploying deep learning models to edge devices. The neural network has been optimized utilizing memory and computation-saving methods like pruning, weight clusteringand quantization.},
month = {},
}
Submit your research paper and those of your network (friends, colleagues, or peers) through your IPN account, and receive 800 INR for each paper that gets published.
Join NowNational Conference on Sustainable Engineering and Management - 2024 Last Date: 15th March 2024
Submit inquiry