@inproceedings{602bc3e585984286bba4f0a3f726f113,
title = "Determining interacting objects in human-centric activities via qualitative spatio-temporal reasoning",
abstract = "Understanding the activities taking place in a video is a challenging problem in Artificial Intelligence. Complex video sequences contain many activities and involve a multitude of interacting objects. Determining which objects are relevant to a particular activity is the first step in understanding the activity. Indeed many objects in the scene are irrelevant to the main activity taking place. In this work, we consider human-centric activities and look to identify which objects in the scene are involved in the activity. We take an activity-agnostic approach and rank every moving object in the scene with how likely it is to be involved in the activity. We use a comprehensive spatio-temporal representation that captures the joint movement between humans and each object. We then use supervised machine learning techniques to recognize relevant objects based on these features. Our approach is tested on the challenging Mind{\textquoteright}s Eye dataset.",
author = "Sokeh, {Hajar Sadeghi} and Stephen Gould and Jochen Renz",
note = "Publisher Copyright: {\textcopyright} Springer International Publishing Switzerland 2015.; 12th Asian Conference on Computer Vision, ACCV 2014 ; Conference date: 01-11-2014 Through 05-11-2014",
year = "2015",
doi = "10.1007/978-3-319-16814-2_36",
language = "English",
series = "Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)",
publisher = "Springer Verlag",
pages = "550--563",
editor = "Daniel Cremers and Hideo Saito and Ian Reid and Ming-Hsuan Yang",
booktitle = "Computer Vision - ACCV 2014 - 12th Asian Conference on Computer Vision, Revised Selected Papers",
address = "Germany",
}