%Aigaion2 BibTeX export from Idiap Publications
%Thursday 04 December 2025 04:55:21 PM
@INPROCEEDINGS{Vitek_IJCB_2025,
author = {Vitek, Matej and Toma{\v s}evi{\'{c}}, Darian and Das, Abhijit and Nathan, Sabari and {\"{O}}zbulak, G{\"{o}}khan and Tataroğlu {\"{O}}zbulak, G{\"{o}}zde Ayşe and Calbimonte, Jean-Paul and Anjos, Andr{\'{e}} and Hemant Bhatt, Hariohm and Dhirendra Premani, Dhruv and Chaudhari, Jay and Wang, Caiyong and Jiang, Jian and Zhang, Chi and Zhang, Qi and Iyappan Ganapathi, Iyyakutti and Sadaf Ali, Syed and Velayudan, Divya and Assefa, Maregu and Werghi, Naoufel and A Daniels, Zachary and John, Leeon and Vyas, Ritesh and Nourmohammadi Khiarak, Jalil and Akbari Saeed, Taher and Nasehi, Mahsa and Kianfar, Ali and Pashazadeh Panahi, Mobina and Sharma, Geetanjali and Raj Panth, Pushp and Ramachandra, Raghavendra and Nigam, Aditya and Pal, Umapada and Pedrini, Helio and Struc, Vitomir},
projects = {FAIRMI},
mainresearchprogram = {AI for Everyone},
additionalresearchprograms = {AI for Life},
title = {Privacy-enhancing Sclera Segmentation Benchmarking Competition: SSBC 2025},
booktitle = {International Joint Conference on Biometrics},
year = {2025},
publisher = {IEEE},
abstract = {This paper presents a summary of the 2025 Sclera Segmentation Benchmarking Competition (SSBC), which focused on the development of privacy-preserving sclera-segmentation models trained using synthetically generated ocular images. The goal of the competition was to evaluate how well models trained on synthetic data perform in comparison to those trained on real-world datasets. The competition featured two tracks: one relying solely on synthetic data for model development, and one combining/mixing synthetic with (a limited amount of) real-world data. A total of nine research groups submitted diverse segmentation models, employing a variety of architectural designs, including transformer-based solutions, lightweight models, and segmentation networks guided by generative frameworks. Experiments were conducted across three evaluation datasets containing both synthetic and real-world images, collected under diverse conditions. Results show that models trained entirely on synthetic data can achieve competitive performance, particularly when dedicated training strategies are employed, as evidenced by the top performing models that achieved scores of over in the synthetic data track. Moreover, performance gains in the mixed track were often driven more by methodological choices rather than by the inclusion of real data, highlighting the promise of synthetic data for privacy-aware biometric development. The code and data for the competition is available at: https://github.com/dariant/SSBC_2025.}
}