Copyright © 2025 Authors retain the copyright of this article. This article is an open access article distributed under the Creative Commons Attribution License which permits unrestricted use, distribution, and reproduction in any medium, provided the original work is properly cited.
@article{188944,
author = {Vijay Babu Koppadi},
title = {PRIVACY- PRESERVING MACHINE LEARNING, INCLUDING FEDERATED LEARNING FOR CYBERSECURITY SOLUTIONS TO PROTECT SENSITIVE USER DATA WHILE TRAINING MODELS},
journal = {International Journal of Innovative Research in Technology},
year = {2025},
volume = {12},
number = {7},
pages = {3822-3829},
issn = {2349-6002},
url = {https://ijirt.org/article?manuscript=188944},
abstract = {Privacy-preserving machine learning, particularly through federated learning, revolutionises cybersecurity by enabling collaborative model training across decentralised devices without exchanging raw, sensitive user data, thereby safeguarding privacy in threat detection and intrusion prevention systems. In traditional centralised approaches, aggregating logs from networks or IoT devices exposes organisations to breaches and regulatory violations like GDPR. Still, federated learning mitigates this by performing local training on edge nodes such as smartphones or enterprise servers where models update using algorithms like Federated Averaging (FedAvg) to share only aggregated gradients or weights with a central coordinator. This decentralized paradigm supports cybersecurity applications including malware classification, anomaly detection in power grids, and collaborative cyber threat in power grids, and collaborative cyber threat intelligence.
This decentralised paradigm supports cybersecurity (CTI), allowing banks, hospitals and IoT networks to pool insights on phishing or ransomware without revealing proprietary information.
To bolster defences against inference attacks like gradient inversion or model poisoning techniques, such as differential privacy, inject calibrated noise, for example, Gaussian or Laplace, into updates, providing mathematical privacy guarantees while trading off some accuracy. Homomorphic encryption enables computations on cipher texts without decryption; secure multi-party computation (SMPC) facilitates joint calculations among parties and secure aggregation masks individual contributions during server-side merging. Despite challenges like non-IID data distribution across clients, communication overhead, and system heterogeneity, federated learning reduces bandwidth needs and enhances real-world model robustness for dynamic threats in smart cities or finance. In cybersecurity, it powers privacy-first intrusion detection systems (IDS) that analyse decentralised traffic patterns, quantum-resistant adaptations for future threats, and adaptive firewalls optimising policies via reinforcement learning without data centralisation. Ongoing research addresses limitations through vertical or horizontal FL variants, bitwise quantisation for efficiency, and local differential privacy to counter membership inference risks. This framework complies with global standards, fosters trust in AI-driven security, and scales to edge computing environments where data locality is paramount. Ultimately privacy privacy-preserving federated learning balances utility and protection, enabling robust cybersecurity solutions aimed at escalating data sensitivity.},
keywords = {Privacy-preserving, Federated, Intelligence, Coordinator, Aggregated, Cybersecurity, Classification, Anomaly, Detection, Aggregation, Mathematical privacy, Homomorphic, Despite, Intrusion, Quantum-resistant.},
month = {December},
}
Cite This Article
Submit your research paper and those of your network (friends, colleagues, or peers) through your IPN account, and receive 800 INR for each paper that gets published.
Join NowNational Conference on Sustainable Engineering and Management - 2024 Last Date: 15th March 2024
Submit inquiry