Copyright © 2026 Authors retain the copyright of this article. This article is an open access article distributed under the Creative Commons Attribution License which permits unrestricted use, distribution, and reproduction in any medium, provided the original work is properly cited.
@article{192598,
author = {Dixitkumar M Chaudhary and Idrish I Sandhi},
title = {A Comprehensive Review of AI and Deep Learning Techniques for Detecting Cybersecurity Threats on Instagram},
journal = {International Journal of Innovative Research in Technology},
year = {2026},
volume = {12},
number = {9},
pages = {3027-3033},
issn = {2349-6002},
url = {https://ijirt.org/article?manuscript=192598},
abstract = {Instagram’s focus on visuals—and the sheer number of people using it—has turned the platform into a magnet for phishing, impersonation, spam, fraud, and cyberbullying. Old-school keyword or rule-based detection just can’t keep up, especially with the way people mix languages, use emojis, and hide bad behavior in images or weird text. This review pulls together research from 2016 to 2025 on how artificial intelligence and deep learning are changing the game when it comes to spotting cyber threats on Instagram. We dig into how threats show up on the platform, what makes Instagram unique, and how researchers build and label datasets. There’s a lot of ground to cover—models that look at text, images, or both at once (think Transformers like BERT and mBERT, or neural nets like ResNet), tools that track relationships and timing, and ways to actually measure what works. We also look at explainable AI methods like SHAP, LIME, and Grad-CAM—because honestly, it helps to know why a model flagged something as dangerous. We put different methods side by side, share how they perform (F1, ROC-AUC, PR-AUC—you get the idea), and call out the big headaches that keep popping up: stuff like code-mixing, sarcasm, hidden abuse, sneaky URLs, adversarial attacks, privacy headaches, and biased datasets. Wrapping up, we lay out where Instagram-focused research should head next: stronger vision-language models, better network and time-based features, learning that happens right on your device or in a privacy-safe way, and smarter human moderation backed by dashboards you can actually understand.},
keywords = {Instagram, cybersecurity, deep learning, natural language processing, multimodal fusion, explainable AI, phishing, cyberbullying, impersonation, spam.},
month = {February},
}
Submit your research paper and those of your network (friends, colleagues, or peers) through your IPN account, and receive 800 INR for each paper that gets published.
Join NowNational Conference on Sustainable Engineering and Management - 2024 Last Date: 15th March 2024
Submit inquiry