@inproceedings{discovery1468724,
          series = {IEEE Conference on Computer Vision and Pattern Recognition},
         journal = {2015 IEEE CONFERENCE ON COMPUTER VISION AND PATTERN RECOGNITION (CVPR)},
            year = {2015},
           title = {Modeling Object Appearance using Context-Conditioned Component Analysis},
            note = {This version is the author accepted manuscript. For information on re-use, please refer to the publisher's terms and conditions.},
           pages = {4156--4164},
       booktitle = {Proceedings of Computer Vision and Pattern Recognition (CVPR), 2015 IEEE Conference},
       publisher = {IEEE},
          volume = {2015},
           month = {October},
         address = {Boston, MA, USA},
        keywords = {Visualization, Mathematical model, Context, Context modeling, Active appearance model, Image color analysis, Analytical models},
        abstract = {Subspace models have been very successful at modeling
the appearance of structured image datasets when the visual objects have been aligned in the images (e.g., faces).
Even with extensions that allow for global transformations or dense warps of the image, the set of visual objects whose appearance may be modeled by such methods is limited.
They are unable to account for visual objects where occlusion leads to changing visibility of different object parts (without a strict layered structure) and where a one-toone mapping between parts is not preserved. For example bunches of bananas contain different numbers of bananas but each individual banana shares an appearance subspace.
In this work we remove the image space alignment limitations of existing subspace models by conditioning the models on a shape dependent context that allows for the complex, non-linear structure of the appearance of the visual object to be captured and shared. This allows us to exploit the advantages of subspace appearance models with non-rigid, deformable objects whilst also dealing with complex occlusions and varying numbers of parts. We demonstrate the effectiveness of our new model with examples of structured inpainting and appearance transfer.},
             url = {http://dx.doi.org/10.1109/CVPR.2015.7299043},
          author = {Turmukhambetov, D and Campbell, NDF and Prince, SJD and Kautz, J},
            issn = {1063-6919}
}