Copyright © 2026 Authors retain the copyright of this article. This article is an open access article distributed under the Creative Commons Attribution License which permits unrestricted use, distribution, and reproduction in any medium, provided the original work is properly cited.
@article{190882,
author = {Akkala Teja Swaroop and Kotha Likhitha},
title = {Advancing Transparency in Machine Learning A Technical Review of Explainable AI},
journal = {International Journal of Innovative Research in Technology},
year = {2026},
volume = {12},
number = {8},
pages = {4024-4032},
issn = {2349-6002},
url = {https://ijirt.org/article?manuscript=190882},
abstract = {The proliferation of complex machine learning models has accentuated the need for explainable AI (XAI) – methods that render black-box models more transparent and understandable. In high-stakes applications (e.g., healthcare diagnosis, financial decisions, autonomous systems), stakeholders demand insights into how models arrive at predictions to ensure trust, fairness, and accountability[1][2]. This paper reviews intrinsic (ante-hoc) and post-hoc interpretability techniques, including feature-attribution methods (e.g., LIME, SHAP) and saliency-based methods (e.g., Grad-CAM), and contrasts model-specific vs. model-agnostic approaches. We discuss evaluation metrics for XAI — notably fidelity (faithfulness), robustness, and human interpretability — which assess how well explanations reflect the model and how understandable they are to users[3][4]. Real-world applications of XAI in domains such as healthcare, finance, and autonomous vehicles are surveyed, highlighting how explanations support decision-makers and compliance. We address regulatory and ethical implications (e.g. GDPR and forthcoming AI laws) that drive XAI adoption. Finally, we outline recent advances — including the use of large language models for natural-language explanations and mechanistic interpretability — and identify open challenges (e.g., standardizing metrics, balancing accuracy vs. explainability) that must be overcome to realize reliable and user-centered XAI[5][6].},
keywords = {Explainable AI (XAI); model interpretability; LIME; SHAP; Grad-CAM; evaluation metrics; fidelity; robustness; transparency; trust; ethics; healthcare; finance; autonomous systems.},
month = {February},
}
Submit your research paper and those of your network (friends, colleagues, or peers) through your IPN account, and receive 800 INR for each paper that gets published.
Join NowNational Conference on Sustainable Engineering and Management - 2024 Last Date: 15th March 2024
Submit inquiry