%Aigaion2 BibTeX export from Idiap Publications
%Friday 05 December 2025 09:50:12 AM

@INPROCEEDINGS{Kumar_NEURIPS2025_2025,
                      author = {Kumar, Shashi and Kaloga, Yacouba and Mitros, John and Motlicek, Petr and Kodrasi, Ina},
                    keywords = {fvae-lora, latent space factorization, LoRA, low-rank adaptation, spurious correlation robustness},
                    projects = {UNIPHORE, ELOQUENCE, ChaSpeePro},
         mainresearchprogram = {Human-AI Teaming},
  additionalresearchprograms = {AI for Everyone},
                       month = dec,
                       title = {Latent Space Factorization in LoRA},
                   booktitle = {39th Conference on Neural Information Processing Systems},
                        year = {2025},
                         url = {https://arxiv.org/abs/2510.19640},
                    abstract = {Low-rank adaptation (LoRA) is a widely used method for parameter-efficient finetuning.
However, existing LoRA variants lack mechanisms to explicitly disambiguate task-relevant information within the learned low-rank subspace, potentially limiting downstream performance. 
We propose Factorized Variational Autoencoder LoRA (FVAE-LoRA), which leverages a VAE to learn two distinct latent spaces.
Our novel Evidence Lower Bound formulation explicitly promotes factorization between the latent spaces, dedicating one latent space to task-salient features and the other to residual information.
Extensive experiments on text, audio, and image tasks demonstrate that FVAE-LoRA consistently outperforms standard LoRA. 
Moreover, spurious correlation evaluations confirm that FVAE-LoRA better isolates task-relevant signals, leading to improved robustness under distribution shifts.
Our code is publicly available at: \url{https://github.com/idiap/FVAE-LoRA}},
                         pdf = {https://publications.idiap.ch/attachments/papers/2025/Kumar_NEURIPS2025_2025.pdf}
}