Copyright © 2026 Authors retain the copyright of this article. This article is an open access article distributed under the Creative Commons Attribution License which permits unrestricted use, distribution, and reproduction in any medium, provided the original work is properly cited.
@article{188936,
author = {Mohammed Sule},
title = {Small Language Models: Efficient Architectures, Compression Techniques, and Edge Deployment Strategies},
journal = {International Journal of Innovative Research in Technology},
year = {2025},
volume = {12},
number = {7},
pages = {4201-4210},
issn = {2349-6002},
url = {https://ijirt.org/article?manuscript=188936},
abstract = {Small Language Models (SLMs) represent a compelling new paradigm of AI going against the expected notion that bigger is better due to more training. This survey is a review of SLMs, small, transformer-based language models with 100 million to 5 billion parameters, implemented for efficient application and deployment over resource-limited devices, including smartphones, tablets, and edge computing. SLMs operate based on architectural differences in relation to big models that allow it to perform similarly to models 10100× smaller. Thus, we investigate the innovations of models that make approaches more efficient by delving into the discoveries related to data that create the “textbook quality” curation for Microsoft’s Phi family, weight sharing, and efficient attention. We analyze what makes SLMs small but powerful based on model compression–quantization, pruning, and knowledge distillation. We delve into in situ deployment efforts with llama.cpp, MLC-LLM, and ExecuTorch, and hardware optimizations through mobile CPUs, GPUs, and NPUs. Finally, we note the benchmarking efforts that show an increase of over 10-14% between 2022-2024 concerning commonsense reasoning, problem solving, and mathematics. For example, Phi-3-mini outperformed GPT-3.5 despite being 45× smaller at 3.8B parameters. This survey compiles findings between NeurIPS, ICML, ICLR and the industry technical reports for a comprehensive overview for practitioners to implement with useful considerations for researchers to develop.},
keywords = {Small Language Models, model compression, knowledge distillation, quantization, pruning, edge deployment, on-device AI, Phi, TinyLlama, MobileLLM, efficient inference},
month = {December},
}
Submit your research paper and those of your network (friends, colleagues, or peers) through your IPN account, and receive 800 INR for each paper that gets published.
Join NowNational Conference on Sustainable Engineering and Management - 2024 Last Date: 15th March 2024
Submit inquiry