%Aigaion2 BibTeX export from Idiap Publications %Saturday 21 December 2024 05:03:03 PM @INPROCEEDINGS{Pinheiro_ECCV_2016, author = {Pinheiro, Pedro H. O. and Lin, Tsung-Yi and Collobert, Ronan and Dollar, Piotr}, projects = {Idiap}, month = sep, title = {Learning to Refine Object Segments}, journal = {European Conference in Computer Vision}, booktitle = {Computer Vision - ECCV 2016}, series = {Lecture Notes in Computer Science}, volume = {9905}, year = {2016}, pages = {75-91}, publisher = {Springer}, location = {Amsterdam}, issn = {0302-9743}, isbn = {978-3-319-46448-0}, url = {http://link.springer.com/chapter/10.1007/978-3-319-46448-0_5}, doi = {10.1007/978-3-319-46448-0_5}, abstract = {Object segmentation requires both object-level information and low-level pixel data. This presents a challenge for feedforward networks: lower layers in convolutional nets capture rich spatial information, while upper layers encode object-level knowledge but are invariant to factors such as pose and appearance. In this work we propose to augment feedforward nets for object segmentation with a novel top-down refinement approach. The resulting bottom-up/top-down architecture is capable of efficiently generating high-fidelity object masks. Similarly to skip connections, our approach leverages features at all layers of the net. Unlike skip connections, our approach does not attempt to output independent predictions at each layer. Instead, we first output a coarse ‘mask encoding’ in a feedforward pass, then refine this mask encoding in a top-down pass utilizing features at successively lower layers. The approach is simple, fast, and effective. Building on the recent DeepMask network for generating object proposals, we show accuracy improvements of 10–20\% in average recall for various setups. Additionally, by optimizing the overall network architecture, our approach, which we call SharpMask, is 50 \% faster than the original DeepMask network (under .8 s per image).}, pdf = {https://publications.idiap.ch/attachments/papers/2016/Pinheiro_ECCV_2016.pdf} }