%Aigaion2 BibTeX export from Idiap Publications %Saturday 21 December 2024 06:09:06 PM @INPROCEEDINGS{Apicella_ICCV_ACVR_2023, author = {Apicella, Tommaso and Xompero, Alessio and Ragusa, Edoardo and Berta, Riccardo and Cavallaro, Andrea and Gastaldo, Paolo}, keywords = {Affordances, computer vision, Convolutional Neural Networks, Fuses, Geometry, Hand occlusion, image segmentation, training, virtual reality, Visualization}, projects = {Idiap}, title = {Affordance segmentation of hand-occluded containers from exocentric images}, booktitle = {Proceedings of the IEEE/CVF International Conference on Computer Vision Workshops (ICCVW)}, year = {2023}, url = {https://ieeexplore.ieee.org/document/10350499}, doi = {10.1109/ICCVW60793.2023.00204}, abstract = {Visual affordance segmentation identifies the surfaces of an object an agent can interact with. Common challenges for the identification of affordances are the variety of the geometry and physical properties of these surfaces as well as occlusions. In this paper, we focus on occlusions of an object that is hand-held by a person manipulating it. To address this challenge, we propose an affordance segmentation model that uses auxiliary branches to process the object and hand regions separately. The proposed model learns affordance features under hand-occlusion by weighting the feature map through hand and object segmentation. To train the model, we annotated the visual affordances of an existing dataset with mixed-reality images of hand-held containers in third-person (exocentric) images. Experiments on both real and mixed-reality images show that our model achieves better affordance segmentation and generalisation than existing models.} }