Copyright © 2026 Authors retain the copyright of this article. This article is an open access article distributed under the Creative Commons Attribution License which permits unrestricted use, distribution, and reproduction in any medium, provided the original work is properly cited.
@article{175497,
author = {Shaik Davood and Dr. R. Yamuna},
title = {CNN2D and SHAP for Enhanced Explainability in Drug Safety Analysis},
journal = {International Journal of Innovative Research in Technology},
year = {2025},
volume = {11},
number = {11},
pages = {3168-3177},
issn = {2349-6002},
url = {https://ijirt.org/article?manuscript=175497},
abstract = {Machine and deep learning models are widely used for predicting future values or classifying objects. However, some organizations, particularly in the medical field, are hesitant to adopt these models for predicting drug side effects or effectiveness. To address this, Explainable AI (XAI) has been introduced, which provides insights into model predictions by identifying the most influential features. This study reviews various machine learning (ML) and deep learning (DL) algorithms—such as XGBoost, GraphCNN, and MLP—using the 'Drug Classification' dataset from Kaggle to predict the best drug for patients based on health conditions. The use of XAI tools like SHAP helps explain these models' predictions, enhancing transparency and fostering trust in AI applications within healthcare.},
keywords = {CNN2D, SHAP, explainable artificial intelligence (XAI)},
month = {April},
}
Submit your research paper and those of your network (friends, colleagues, or peers) through your IPN account, and receive 800 INR for each paper that gets published.
Join NowNational Conference on Sustainable Engineering and Management - 2024 Last Date: 15th March 2024
Submit inquiry