%Aigaion2 BibTeX export from Idiap Publications %Saturday 21 December 2024 06:25:46 PM @ARTICLE{Varadarajan_IJCV_2012, author = {Varadarajan, Jagannadan and Emonet, Remi and Odobez, Jean-Marc}, keywords = {Unsupervised {\textperiodcentered} Latent sequential patterns {\textperiodcentered} Topic models {\textperiodcentered} PLSA {\textperiodcentered} Video surveillance {\textperiodcentered} Activity analysis}, projects = {Idiap, HAI, VANAHEIM}, month = may, title = {A Sequential Topic Model for Mining Recurrent Activities from Long Term Video Logs}, journal = {International Journal of Computer Vision}, volume = {103}, number = {1}, year = {2013}, pages = {100-126}, abstract = {This paper introduces a novel probabilistic activity modeling approach that mines recurrent sequential patterns called motifs from documents given as word×time count matrices (e.g., videos). In this model, documents are represented as a mixture of sequential activity patterns (our motifs) where the mixing weights are defined by the motif starting time occurrences. The novelties are multi fold. First, unlike previous approaches where topics modeled only the co-occurrence of words at a given time instant, our motifs model the co-occurrence and temporal order in which the words occur within a temporal window. Second, unlike traditional Dynamic Bayesian Networks (DBN), our model accounts for the important case where activities occur concurrently in the video (but not necessarily in syn- chrony), i.e., the advent of activity motifs can overlap. The learning of the motifs in these difficult situations is made possible thanks to the introduction of latent variables representing the activity starting times, enabling us to implicitly align the occurrences of the same pattern during the joint inference of the motifs and their starting times. As a third novelty, we propose a general method that favors the recovery of sparse distributions, a highly desirable property in many topic model applications, by adding simple regularization constraints on the searched distributions to the data likelihood optimization criteria. We substantiate our claims with experiments on synthetic data to demonstrate the algorithm behavior, and on four video datasets with significant variations in their activity content obtained from static cameras. We observe that using low-level motion features from videos, our algorithm is able to capture sequential patterns that implicitly represent typical trajectories of scene objects.}, pdf = {https://publications.idiap.ch/attachments/papers/2012/Varadarajan_IJCV_2012.pdf} }