@inbook{bengio-lecun-07,
    abstract = {{One long-term goal of machine learning research is to produce methods that 
are applicable to highly complex tasks, such as perception (vision, audition), rea- 
soning, intelligent control, and other artificially intelligent behaviors. We arg ue 
that in order to progress toward this goal, the Machine Learning community must 
endeavor to discover algorithms that can learn highly complex functions, with min- 
imal need for prior knowledge, and with minimal human intervention. We pr esent 
mathematical and empirical evidence suggesting that many popular approaches 
to non-parametric learning, particularly kernel methods, are fundame ntally lim- 
ited in their ability to learn complex high-dimensional functions. Our analysis 
focuses on two problems. First, kernel machines are shallow architectures, in 
which one large layer of simple template matchers is followed by a single layer 
of trainable coefficients. We argue that shallow architectures can be ver y ineffi- 
cient in terms of required number of computational elements and example s. Sec- 
ond, we analyze a limitation of kernel machines with a local kernel, linked to the 
curse of dimensionality, that applies to supervised, unsupervised (manifold learn- 
ing) and semi-supervised kernel machines. Using empirical results on invariant 
image recognition tasks, kernel methods are compared with deep architectures, in 
which lower-level features or concepts are progressively combined into more ab- 
stract and higher-level representations. We argue that deep architec tures have the 
potential to generalize in non-local ways, i.e., beyond immediate neighbors, and 
that this is crucial in order to make progress on the kind of complex tasks r equired 
for artificial intelligence.}},
    author = {Bengio, Yoshua and Lecun, Yann},
    booktitle = {Large-Scale Kernel Machines},
    citeulike-article-id = {2492447},
    citeulike-linkout-0 = {http://yann.lecun.com/exdb/publis/index.html\#bengio-lecun-07},
    comment = {Deep learning architectures},
    editor = {Bottou, L. and Chapelle, O. and Decoste, D. and Weston, J.},
    keywords = {deep, deep\_learning, deep\_learning\_architectures},
    posted-at = {2008-03-09 04:20:46},
    priority = {0},
    publisher = {MIT Press},
    title = {{Scaling learning algorithms towards AI}},
    url = {http://yann.lecun.com/exdb/publis/index.html\#bengio-lecun-07},
    year = {2007}
}

@techreport{bengio-07,
    abstract = {{Theoretical results strongly suggest that in order to learn the kind of
complicated functions that can represent high-level abstractions (e.g. in
vision, language, and other AI-level tasks), one may need deep
architectures. Deep architectures are composed of multiple levels of non-linear
operations, such as in neural nets with many hidden layers. Searching the
parameter space of deep architectures is a difficult optimization task, but
learning algorithms such as those for Deep Belief Networks have recently been proposed
to tackle this problem with notable success, beating the state-of-the-art
in certain areas. This paper discusses the motivations and principles regarding
learning algorithms for deep architectures and in particular for those based
on unsupervised learning such as Deep Belief Networks, using as building
blocks single-layer models such as Restricted Boltzmann Machines.}},
    author = {Bengio, Yoshua},
    citeulike-article-id = {3196377},
    citeulike-linkout-0 = {http://www.iro.umontreal.ca/\~{}lisa/pointeurs/TR1312.pdf},
    citeulike-linkout-1 = {http://www.iro.umontreal.ca/\~{}lisa/publications/?page=publication\&\#38;kind=single\&\#38;ID=209},
    institution = {Dept. IRO, Universite de Montreal},
    keywords = {deep\_learning\_architectures},
    posted-at = {2008-09-04 22:58:46},
    priority = {5},
    title = {{Learning deep architectures for AI}},
    url = {http://www.iro.umontreal.ca/\~{}lisa/pointeurs/TR1312.pdf},
    year = {2007}
}


