%Aigaion2 BibTeX export from Idiap Publications
%Thursday 21 November 2024 04:55:25 PM

@ARTICLE{Korshunov_TBIOM_2021,
         author = {Korshunov, Pavel and Marcel, S{\'{e}}bastien},
       projects = {Idiap, Verifake, Biometrics Center},
          month = dec,
          title = {Improving Generalization of Deepfake Detection with Data Farming and Few-Shot Learning},
        journal = {IEEE Transactions on Biometrics, Behavior, and Identity Science},
           year = {2021},
       abstract = {Recent advances in automated video and audio editing tools, generative adversarial networks (GANs), and social media allow creation and fast dissemination of high quality tampered videos, which are generally called deepfakes. Typically, in these videos, a face is swapped with someone else's using GANs.  Accessible open source software and apps for the face swapping led to a wide and rapid dissemination of the generated deepfakes, posing a significant technical challenge for their detection and filtering. In response to the threat, which deepfake videos can pose to our trust in video evidence, several large datasets of deepfake videos and several methods to detect them were proposed recently.  However, the proposed methods suffer from a problem of overfitting on the training data and the lack of the generalization across different databases and the generative models. Therefore, in this paper, we investigate the techniques for improving the generalization of deepfake detection methods that can be employed in practical settings. We have selected two popular state of the art deepfake detectors: based on Xception and EfficientNet models, and we use five databases: from Google and Jigsaw, FaceForensics++, DeeperForensics, Celeb-DF, and our own publicly available large dataset DF-Mobio. To improve generalization, we apply different augmentation strategies used during training, including a proposed aggressive `data farming' technique based on random patches. We also tested two few-shot tuning methods, when either a first convolutional layer or a last layer of a pre-trained model is tuned on 100 seconds from a training set of the test database. The experimental results clearly expose the generalization problem of deepfake detection methods, since the accuracy drops significantly when a model is trained on one dataset and evaluated on another. However, the silver lining is that an aggressive augmentation during training and a few-shot tuning on the test database can improve the accuracy of the detection methods in a cross-database scenario. As a side observation, we show the importance of database selection for training and evaluation, as FaceForensics++ is found to be better to use for training, while DeeperForensics is found to be significantly more challenging as a test database.},
            pdf = {https://publications.idiap.ch/attachments/papers/2021/Korshunov_TBIOM_2021.pdf}
}