Copyright © 2026 Authors retain the copyright of this article. This article is an open access article distributed under the Creative Commons Attribution License which permits unrestricted use, distribution, and reproduction in any medium, provided the original work is properly cited.
@article{196367,
author = {Gopavarapu Sri Rama Krishna Vamsi and Nagababu Pachhala and Garlapati Ganesh and Busi Vineeth Kumar and Guntur Jaswanth},
title = {Deepfake Detection Using ViT-BiLSTM Based Face Analysis},
journal = {International Journal of Innovative Research in Technology},
year = {2026},
volume = {12},
number = {11},
pages = {2520-2527},
issn = {2349-6002},
url = {https://ijirt.org/article?manuscript=196367},
abstract = {Deepfake videos have become increasingly realistic due to advancements in generative models, posing significant challenges for digital media authenticity and security. This paper presents a hybrid deep learning framework for deepfake video detection that integrates Vision Transformer (ViT)-based spatial feature extraction with Bidirectional Long Short-Term Memory (BiLSTM) temporal modelling. The proposed system processes videos through frame extraction, face detection using Retina Face with MTCNN as fallback, identity-consistent face tracking using a multi-criterion approach, and feature extraction using a pretrained ViT model. The extracted embeddings are analysed using a BiLSTM network to capture temporal inconsistencies across frames. The model is trained on Face Forensics++ and CelebDF datasets and evaluated using both internal and cross-dataset validation. Experimental results show that the proposed approach achieves high accuracy, with a balanced accuracy of 93.5% and ROC-AUC of 0.987 on the internal test set, demonstrating strong discriminative capability and low false-positive rates. However, performance on SDFVD dataset indicates a reduction in generalization due to domain shift. The proposed system provides an effective and reliable solution for deepfake detection, particularly in forensic applications where minimizing false positives is critical, and highlights the need for improved cross-dataset robustness in future research.},
keywords = {Deepfake Detection, Vision Transformer (ViT), Bidirectional LSTM (BiLSTM), Temporal Analysis, Multimedia Forensics, Face Tracking.},
month = {April},
}
Submit your research paper and those of your network (friends, colleagues, or peers) through your IPN account, and receive 800 INR for each paper that gets published.
Join NowNational Conference on Sustainable Engineering and Management - 2024 Last Date: 15th March 2024
Submit inquiry