%Aigaion2 BibTeX export from Idiap Publications %Saturday 21 December 2024 05:20:16 PM @ARTICLE{Kotwal_IEEETBIOM_2022, author = {Kotwal, Ketan and Bhattacharjee, Sushil and Abbet, Philip and Mostaani, Zohreh and Wei, Huang and Wenkang, Xu and Yaxi, Zhao and Marcel, S{\'{e}}bastien}, keywords = {CNN, domain adaptation, Face presentation attack detection (PAD), near-infrared}, projects = {Idiap}, title = {Domain-Specific Adaptation of CNN for Detecting Face Presentation Attacks in NIR}, journal = {IEEE Transactions on Biometrics, Behavior, and Identity Science}, year = {2022}, abstract = {For the automotive industry moving towards personalized applications and experiences, the identification of the person inside vehicle is necessary; and it must be carried out in a secure manner. In this paper, we propose a unique face presentation attack detection (PAD) system for operation inside a passenger vehicle. A typical in-vehicular face PAD system is required to function with several constraints such as bounded sensing (imaging) capabilities, limited computing resources on embedded devices, real-time inference, and essentially, very high accuracy. In this work, we develop a face PAD system for automotive domain, relying on a single NIR camera, to continually verify whether the driver’s face is bona-fide or not. Our work has two main contributions: first, a lightweight face PAD framework has been developed using a 9-layer convolutional neural network (CNN). With its compact size and limited set of operators, it can be deployed in a resource constrained embedded device to achieve a near real-time inference. To alleviate the problem of limited training data (face PAD in NIR) for a given system, we develop an efficient mechanism to obtain this CNN through the combination of adaptation of domain-specific layers and task-specific fine-tuning of a base CNN. As the second contribution, we collect a large face PAD dataset with 5800+ videos, acquired in NIR (940 nm) illumination, for in-vehicular use-cases. This dataset, named VFPAD, captures several real-world variations in terms of environmental settings, illumination, subject’s pose, and appearances. Based on the VFPAD dataset, we demonstrate that the proposed face PAD method achieves very high performance (overall accuracy ≈ 98.0\%), and also outperforms several baseline face PAD methods. The dataset will be shared with the wider scientific community for research purposes.}, pdf = {https://publications.idiap.ch/attachments/papers/2022/Kotwal_IEEETBIOM_2022.pdf} }