Copyright © 2025 Authors retain the copyright of this article. This article is an open access article distributed under the Creative Commons Attribution License which permits unrestricted use, distribution, and reproduction in any medium, provided the original work is properly cited.
@article{187941,
author = {Manan Sojatia},
title = {A Unified Deep Learning Framework for CT-Based Lung Cancer and Histopathology-Based Breast Cancer Classification},
journal = {International Journal of Innovative Research in Technology},
year = {2025},
volume = {12},
number = {7},
pages = {108-123},
issn = {2349-6002},
url = {https://ijirt.org/article?manuscript=187941},
abstract = {Diagnosing cancer from medical imaging still represents one of most complex jobs in clinical practice, especially lung cancer and breast cancer, which had more than 4.8 million new cases combined in 2022. Lung cancer continues to have the highest mortality rate because it is detected late, and breast cancer is the most diagnosed cancer in women and requires accurate diagnostic processes in a timely manner. Previously established diagnostic workflows heavily utilize the judgment of radiologists and pathologists on assessing CT scans and the histopathology slides. The process is time-consuming, subjective and often not possible in resource poor settings.
Deep learning-based methods have been developed with strong performance using underlying architectures (such as VGG19, ResNet, DenseNet, Inception-based networks, and EfficientNet). These models have performed impressively well compared to the state-of-the-art performance reported in the literature, while also providing strong feature extraction capabilities. However, many previous studies had some level of lack of diversity in the dataset, overfitting due to low number of datasets, insufficient cross-model comparisons and standardized preprocessing pipelines. Further, a lot of research only takes into account one model or dataset, limiting the power of the reported results to be meaningful for deployment in the real-world.
To fill these gaps, this study presented a full evaluation framework model for automated cancer detection based on eight state-of-the-art ImageNet pretrained CNN architectures - VGG19, ResNet152V2, DenseNet201, InceptionV3, InceptionResNetV2, Xception, EfficientNetB1, and MobileNetV2. A consistent framework was applied between two datasets - Lung Cancer CT Scan dataset and BreaKHis Breast Cancer Histopathology. The framework included consistency in preprocessing, image augmentation, as well as using a single classification head with a two stage training process for all architectures of frozen training and fine-tuning 35% of backbone layers. Performance was measured with Accuracy, Precision, Recall, F1-score, ROC–AUC, and PR–AUC metrics.The proposed framework demonstrated high discriminative performance across both domains. DenseNet201 achieved the highest accuracy (99.0%+) and ROC–AUC (0.99+) for the classification of lung cancer, exceeding several results in the current literature. In breast cancer histopathology images, VGG19 was the top-performing model with an accuracy of 90%, high precision, and a ROC–AUC of 0.96. This provides a measure of consistency with previous research emphasizing the utility of deeper CNN backbones for texture-rich medical images. The results support that a standardized multi-model evaluation strategy enhances reliability, limits bias from the datasets, and increases clarity in the supporting the model choice for practical use cases.},
keywords = {},
month = {November},
}
Cite This Article
Submit your research paper and those of your network (friends, colleagues, or peers) through your IPN account, and receive 800 INR for each paper that gets published.
Join NowNational Conference on Sustainable Engineering and Management - 2024 Last Date: 15th March 2024
Submit inquiry