Copyright © 2026 Authors retain the copyright of this article. This article is an open access article distributed under the Creative Commons Attribution License which permits unrestricted use, distribution, and reproduction in any medium, provided the original work is properly cited.
@article{191660,
author = {Ananya Singh},
title = {Slopsquatting and package-hallucination in LLMS},
journal = {International Journal of Innovative Research in Technology},
year = {},
volume = {12},
number = {no},
pages = {1-12},
issn = {2349-6002},
url = {https://ijirt.org/article?manuscript=191660},
abstract = {The reliance of popular languages like C++, Python, Java, and JavaScript on centralized package repositories, combined with large language models (LLMs), has created a new type of threat: package hallucination, which can also lead to slopsquatting. These two problems are closely connected.
Package hallucination occurs when LLMs sometimes import or suggest packages or dependencies that don’t exist in the language ecosystem. This poses a risk because developers may assume such packages are real, leading to debugging delays and potential vulnerabilities in enterprise environments.
This research focuses on cross-language hallucinations, dependency-version hallucinations, and IDE integration hallucinations in languages like C++ and Java. Slopsquatting exploits developers’ typos or lookalike names to publish malicious packages (e.g., reqeusts vs requests). When attackers register hallucinated package names, hallucination can directly enable slopsquatting attacks.
I conducted experiments on multiple LLMs, including CodeLlama, GPT-3, GPT-5, DeepSeek, and Qwen-3, using prompts designed to trigger hallucinations. Each generated code sample was analyzed to identify hallucinated, partially hallucinated, and valid packages. The findings show that hallucinations are recurring patterns, not isolated mistakes, and that the risk of slopsquatting is real across different languages and models.
Finally, a dataset of C++ and Java prompts with LLM-generated code samples is released to facilitate reproducibility and further research. This study highlights the importance of careful dependency validation, prompt design, and developer awareness to mitigate risks associated with hallucinated packages and slopsquatting.},
keywords = {Attack, Cross-language, Malicious, Large Language Models (LLMs), Package hallucination, Slopsquatting, Hallucination},
month = {},
}
Cite This Article
Submit your research paper and those of your network (friends, colleagues, or peers) through your IPN account, and receive 800 INR for each paper that gets published.
Join NowNational Conference on Sustainable Engineering and Management - 2024 Last Date: 15th March 2024
Submit inquiry