@inproceedings{1ad767ae8c2b4a2997be00cc6b763859,
title = "Learning Visual Context by Comparison",
abstract = "Finding diseases from an X-ray image is an important yet highly challenging task. Current methods for solving this task exploit various characteristics of the chest X-ray image, but one of the most important characteristics is still missing: the necessity of comparison between related regions in an image. In this paper, we present Attend-and-Compare Module (ACM) for capturing the difference between an object of interest and its corresponding context. We show that explicit difference modeling can be very helpful in tasks that require direct comparison between locations from afar. This module can be plugged into existing deep learning models. For evaluation, we apply our module to three chest X-ray recognition tasks and COCO object detection & segmentation tasks and observe consistent improvements across tasks. The code is available at https://github.com/mk-minchul/attend-and-compare.",
keywords = "Attention mechanism, Chest X-ray, Context modeling",
author = "Minchul Kim and Jongchan Park and Seil Na and Park, {Chang Min} and Donggeun Yoo",
note = "Publisher Copyright: {\textcopyright} 2020, Springer Nature Switzerland AG.; null ; Conference date: 23-08-2020 Through 28-08-2020",
year = "2020",
doi = "10.1007/978-3-030-58558-7_34",
language = "English",
isbn = "9783030585570",
series = "Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)",
publisher = "Springer Science and Business Media Deutschland GmbH",
pages = "576--592",
editor = "Andrea Vedaldi and Horst Bischof and Thomas Brox and Jan-Michael Frahm",
booktitle = "Computer Vision – ECCV 2020 - 16th European Conference, Proceedings",
address = "Germany",
}