Copyright © 2026 Authors retain the copyright of this article. This article is an open access article distributed under the Creative Commons Attribution License which permits unrestricted use, distribution, and reproduction in any medium, provided the original work is properly cited.
@article{194646,
author = {Tathagat and Md Aftab Alam and Suhel Ansari and Sanjay Choudhry and Tanya Shisodiya},
title = {Context Window Degradation in a Resource-Constrained Language Model: Empirical Evidence from MiniLLM},
journal = {International Journal of Innovative Research in Technology},
year = {2026},
volume = {12},
number = {10},
pages = {4757-4764},
issn = {2349-6002},
url = {https://ijirt.org/article?manuscript=194646},
abstract = {We present an empirical study of context window degradation in MiniLLM, a 57.5 million parameter GPT-style transformer trained from scratch on approximately 150 million tokens using a single consumer-grade GPU (NVIDIA RTX 3050, 6 GB). We conduct two experiments across four fine-tuned checkpoints (QA, farming, story, poetry): a positional recall probe measuring factual retrieval accuracy across three context lengths and three positions, and a multi-turn perplexity evaluation across eight conversation turns. Our results confirm five findings. First, the lost-in-the-middle effect is present at 57.5M scale, where middle-positioned facts in the QA checkpoint degrade from 30% to 15% soft-match accuracy at 350 tokens. Second, positional recall does not generalise across checkpoints — all non-QA checkpoints score near 0%, establishing that factual recall from context is a QA-specific capability rather than a general property of the base model. Third, all four checkpoints exhibit multi-turn perplexity degradation, but the timing and severity differ systematically with fine-tuning domain: QA and farmer collapse at Turn 6, while story and poetry exhibit high baseline perplexity from Turn 1 due to training-distribution mismatch. Fourth, a two-phase collapse trajectory is observed in factual-domain checkpoints: an initial uncertainty phase followed by partial perplexity recovery consistent with retreat to a low-perplexity fluency attractor. Fifth, effective usable context is approximately 80–200 tokens rather than the nominal 512-token window. All code, checkpoints, and evaluation scripts are publicly available.},
keywords = {},
month = {March},
}
Submit your research paper and those of your network (friends, colleagues, or peers) through your IPN account, and receive 800 INR for each paper that gets published.
Join NowNational Conference on Sustainable Engineering and Management - 2024 Last Date: 15th March 2024
Submit inquiry