%Aigaion2 BibTeX export from Idiap Publications
%Monday 30 December 2024 06:29:50 PM

@PHDTHESIS{Unnervik_THESIS_2024,
         author = {Unnervik, Alexander},
         editor = {Marcel, S{\'{e}}bastien},
       keywords = {anomaly detection, Backdoor attack, backdoor attack detection, Convolutional neural network., embedding translation, Face Recognition, model pairing, trojan attack},
       projects = {Idiap, TRESPASS-ETN},
          month = jun,
          title = {Performing And Detecting Backdoor Attacks on Face Recognition Algorithms},
           year = {2024},
         school = {Ecole Polytechnique F{\'{e}}d{\'{e}}rale de Lausanne},
       abstract = {The field of biometrics, and especially face recognition, has seen a wide-spread adoption the last few years, from access control on personal devices such as phones and laptops, to automated border controls such as in airports. The stakes are increasingly higher for these applications and thus the risks of succumbing to attacks are rising. More sophisticated algorithms typically require more data samples and larger models, leading to the need for more compute and expertise. These add up to making deep learning algorithms more a service provided by third parties, meaning more control and oversight of these algorithms are relinquished.

When so much depends on these models working right, with nefarious actors gaining so much from them being circumvented, how does one then verify their integrity? This is the conundrum of integrity which is at the heart of the work presented here.

One way by which face recognition algorithms (or more generally speaking, deep learning algorithms) fail, is by being vulnerable to backdoor attacks (BA): a type of attack involving a modification of the training set or the network weights to control the output behavior when exposed to specific samples. The detection of these backdoored networks (which we refer to as backdoor attack detection (BAD) is a challenging task, which is still an active field of research, particularly so when considering the constraints within which the literature considers the challenge (e.g. little to no consideration of open-set classification algorithms).

In this thesis, we demonstrate that BAs can be performed on large face recognition algorithms and further the state of the art in BAD by providing with the following contributions: first, we study the vulnerability of face recognition algorithms to backdoor attacks and identify backdoor attack success with respect to the choice of identities and other variables. Second, we propose a first method by which backdoor attacks can be detected by studying weights distribution of clean models and comparing an unknown model to such distributions. This method is based on the principle of anomaly detection. Third, we propose a method for safely deploying models to make use of their clean behavior and detecting the activation of backdoors with a technique we call model pairing.},
            pdf = {https://publications.idiap.ch/attachments/papers/2024/Unnervik_THESIS_2024.pdf}
}