% Encoding: UTF-8
@ARTICLE{dvs128,
author={P. {Lichtsteiner} and C. {Posch} and T. {Delbruck}},
journal={IEEE Journal of Solid-State Circuits},
title={A 128$\times$ 128 120 dB 15 $\mu$s Latency Asynchronous Temporal Contrast Vision Sensor},
year={2008},
volume={43},
number={2},
pages={566-576},
keywords={CMOS image sensors;latency asynchronous temporal contrast vision sensor;CMOS vision sensor;spike event;address-event representation;active continuous-time front-end logarithmic photoreceptor;self-timed switched-capacitor differencing circuit;chip power consumption;silicon retina;image sensor;bandwidth 3 kHz;power 23 mW;time 15 mus;size 0.35 mum;Delay;Layout;Sensor arrays;Lighting;Sensor phenomena and characterization;Bandwidth;Dynamic range;Streaming media;Reflectivity;Timing;Address-event representation (AER);asynchronous vision sensor;high-speed imaging;image sensors;machine vision;neural network hardware;neuromorphic circuit;robot vision systems;visual system;wide dynamic range imaging},
doi={10.1109/JSSC.2007.914337},
ISSN={1558-173X},
month={Feb},}

@article{paredes2019unsupervised,
  title={Unsupervised learning of a hierarchical spiking neural network for optical flow estimation: From events to global motion perception},
  author={Paredes-Vall{\'e}s, Federico and Scheper, Kirk Yannick Willehm and De Croon, Guido Cornelis Henricus Eugene},
  journal={IEEE transactions on pattern analysis and machine intelligence},
  year={2019},
  publisher={IEEE}
}

@article{haessig2018spiking,
  title={Spiking optical flow for event-based sensors using ibm's truenorth neurosynaptic system},
  author={Haessig, Germain and Cassidy, Andrew and Alvarez, Rodrigo and Benosman, Ryad and Orchard, Garrick},
  journal={IEEE transactions on biomedical circuits and systems},
  volume={12},
  number={4},
  pages={860--870},
  year={2018},
  publisher={IEEE}
}

@article{maass1997networks,
  title={Networks of spiking neurons: the third generation of neural network models},
  author={Maass, Wolfgang},
  journal={Neural networks},
  volume={10},
  number={9},
  pages={1659--1671},
  year={1997},
  publisher={Elsevier}
}
@article{merolla2014million,
  title={A million spiking-neuron integrated circuit with a scalable communication network and interface},
  author={Merolla, Paul A and Arthur, John V and Alvarez-Icaza, Rodrigo and Cassidy, Andrew S and Sawada, Jun and Akopyan, Filipp and Jackson, Bryan L and Imam, Nabil and Guo, Chen and Nakamura, Yutaka and others},
  journal={Science},
  volume={345},
  number={6197},
  pages={668--673},
  year={2014},
  publisher={American Association for the Advancement of Science}
}

@inproceedings{huh2018gradient,
  title={Gradient descent for spiking neural networks},
  author={Huh, Dongsung and Sejnowski, Terrence J},
  booktitle={Advances in Neural Information Processing Systems},
  pages={1433--1443},
  year={2018}
}

@article{lee2018deep,
  title={Deep Spiking Convolutional Neural Network Trained with Unsupervised Spike Timing Dependent Plasticity},
  author={Lee, Chankyu and Srinivasan, Gopalakrishnan and Panda, Priyadarshini and Roy, Kaushik},
  journal={IEEE Transactions on Cognitive and Developmental Systems},
  year={2018},
  publisher={IEEE}
}

@article{kheradpisheh2016stdp,
  title={STDP-based spiking deep neural networks for object recognition},
  author={Kheradpisheh, Saeed Reza and Ganjtabesh, Mohammad and Thorpe, Simon J and Masquelier, Timoth{\'e}e},
  journal={arXiv preprint arXiv:1611.01421},
  year={2016}
}


@article{burkitt2006review,
  title={A review of the integrate-and-fire neuron model: I. Homogeneous synaptic input},
  author={Burkitt, Anthony N},
  journal={Biological cybernetics},
  volume={95},
  number={1},
  pages={1--19},
  year={2006},
  publisher={Springer}
}
@inproceedings{geiger2012we,
  title={Are we ready for autonomous driving? the kitti vision benchmark suite},
  author={Geiger, Andreas and Lenz, Philip and Urtasun, Raquel},
  booktitle={2012 IEEE Conference on Computer Vision and Pattern Recognition},
  pages={3354--3361},
  year={2012},
  organization={IEEE}
}
@inproceedings{lin2016fixed,
  title={Fixed point quantization of deep convolutional networks},
  author={Lin, Darryl and Talathi, Sachin and Annapureddy, Sreekanth},
  booktitle={International Conference on Machine Learning},
  pages={2849--2858},
  year={2016}
}

@inproceedings{han2015learning,
  title={Learning both weights and connections for efficient neural network},
  author={Han, Song and Pool, Jeff and Tran, John and Dally, William},
  booktitle={Advances in neural information processing systems},
  pages={1135--1143},
  year={2015}
}

@inproceedings{meister2018unflow,
  title={UnFlow: Unsupervised learning of optical flow with a bidirectional census loss},
  author={Meister, Simon and Hur, Junhwa and Roth, Stefan},
  booktitle={Thirty-Second AAAI Conference on Artificial Intelligence},
  year={2018}
}

@ARTICLE{dvs240,
author={C. {Brandli} and R. {Berner} and M. {Yang} and S. {Liu} and T. {Delbruck}},
journal={IEEE Journal of Solid-State Circuits},
title={A 240 × 180 130 dB 3 µs Latency Global Shutter Spatiotemporal Vision Sensor},
year={2014},
volume={49},
number={10},
pages={2333-2341},
keywords={CMOS image sensors;photodetectors;photodiodes;sensor arrays;gain 130 dB;time 3 mus;size 0.18 mum;gain 51 dB;latency global shutter spatiotemporal vision sensor;event-based dynamic vision sensor;asynchronously report log intensity change;robotics;real-time tracking;object recognition;object classification;DAVIS;dynamic and active pixel vision sensor;active pixel sensor circuit;APS circuit;DVS circuits;photodiode;sensor array;6M1P CMOS image sensor technology;CIS technology;Voltage control;Robot sensing systems;Photoreceptors;Photodiodes;Photoconductivity;Cameras;Universal Serial Bus;Active pixel sensor (APS);address event representation (AER);CMOS image sensor;dynamic and active pixel vision sensor (DAVIS);dynamic vision sensor (DVS);event-based;neuromorphic engineering;spike-based},
doi={10.1109/JSSC.2014.2342715},
ISSN={1558-173X},
month={Oct},}

@article{qvga143,
author = {Posch, Christoph and Matolin, Daniel and Wohlgenannt, Rainer},
year = {2011},
month = {02},
pages = {259 - 275},
title = {A QVGA 143 dB dynamic range frame-free PWM image sensor with lossless pixel-level video compression and time-domain CDS},
volume = {46},
journal = {Solid-State Circuits, IEEE Journal of},
doi = {10.1109/JSSC.2010.2085952}
}

@article{lee2020enabling,
  title={Enabling Spike-based Backpropagation for Training Deep Neural Network Architectures},
  author={Lee, Chankyu and Sarwar, Syed Shakib and Panda, Priyadarshini and Srinivasan, Gopalakrishnan and Roy, Kaushik},
  journal={Frontiers in Neuroscience},
  volume={14},
  pages={119},
  year={2020},
  publisher={Frontiers}
}

@article{lee2016training,
  title={Training deep spiking neural networks using backpropagation},
  author={Lee, Jun Haeng and Delbruck, Tobi and Pfeiffer, Michael},
  journal={Frontiers in neuroscience},
  volume={10},
  pages={508},
  year={2016},
  publisher={Frontiers}
}

@techreport{rumelhart1985learning,
  title={Learning internal representations by error propagation},
  author={Rumelhart, David E and Hinton, Geoffrey E and Williams, Ronald J},
  year={1985},
  institution={California Univ San Diego La Jolla Inst for Cognitive Science}
}


@ARTICLE{loihi2018,
author={M. {Davies} and N. {Srinivasa} and T. {Lin} and G. {Chinya} and Y. {Cao} and S. H. {Choday} and G. {Dimou} and P. {Joshi} and N. {Imam} and S. {Jain} and Y. {Liao} and C. {Lin} and A. {Lines} and R. {Liu} and D. {Mathaikutty} and S. {McCoy} and A. {Paul} and J. {Tse} and G. {Venkataramanan} and Y. {Weng} and A. {Wild} and Y. {Yang} and H. {Wang}},
journal={IEEE Micro},
title={Loihi: A Neuromorphic Manycore Processor with On-Chip Learning},
year={2018},
volume={38},
number={1},
pages={82-99},
keywords={circuit optimisation;integrated circuit modelling;learning (artificial intelligence);microprocessor chips;multiprocessing systems;neural chips;spike-based computation;CPU iso-process-voltage-area;magnitude superior energy-delay-product;LASSO optimization problems;locally competitive algorithm;hierarchical connectivity;dendritic compartments;synaptic delays;programmable synaptic learning rules;spiking neural networks;Intels process;on-chip learning;neuromorphic manycore processor;Loihi;size 14 nm;Neurons;Computer architecture;Computational modeling;Neuromorphics;Biological neural networks;Algorithm design and analysis;neuromorphic computing;machine learning;artificial intelligence},
doi={10.1109/MM.2018.112130359},
ISSN={1937-4143},
month={January},}

@article{benosman1,
title = {Asynchronous frameless event-based optical flow},
journal = {Neural Networks},
volume = {27},
pages = {32 - 37},
year = {2012},
issn = {0893-6080},
doi = {https://doi.org/10.1016/j.neunet.2011.11.001},
url = {http://www.sciencedirect.com/science/article/pii/S0893608011002930},
author = {Ryad Benosman and Sio-Hoi Ieng and Charles Clercq and Chiara Bartolozzi and Mandyam Srinivasan},
keywords = {Asynchronous acquisition, Spikes, Temporal dynamics, Event-based vision, Frameless vision, Optical flow},
abstract = {This paper introduces a process to compute optical flow using an asynchronous event-based retina at high speed and low computational load. A new generation of artificial vision sensors has now started to rely on biologically inspired designs for light acquisition. Biological retinas, and their artificial counterparts, are totally asynchronous and data driven and rely on a paradigm of light acquisition radically different from most of the currently used frame-grabber technologies. This paper introduces a framework for processing visual data using asynchronous event-based acquisition, providing a method for the evaluation of optical flow. The paper shows that current limitations of optical flow computation can be overcome by using event-based visual acquisition, where high data sparseness and high temporal resolution permit the computation of optical flow with micro-second accuracy and at very low computational cost.}
}

@ARTICLE{benosman2,
author={R. {Benosman} and C. {Clercq} and X. {Lagorce} and S. {Ieng} and C. {Bartolozzi}},
journal={IEEE Transactions on Neural Networks and Learning Systems},
title={Event-Based Visual Flow},
year={2014},
volume={25},
number={2},
pages={407-417},
keywords={eye;image sequences;event-based visual flow;precise timings;asynchronous event-based retina;biological retinas;frame-grabber technologies;local differential approach;precise visual flow orientation;Visualization;Sensors;Voltage control;Retina;Timing;Real-time systems;Cameras;Event-based vision;event-based visual motion flow;neuromorphic sensors;real time;Algorithms;Artificial Intelligence;Humans;Models, Neurological;Motion;Optic Flow;Retina;Visual Pathways},
doi={10.1109/TNNLS.2013.2273537},
ISSN={2162-2388},
month={Feb},}

@ARTICLE{barranco2014,
author={F. {Barranco} and C. {Fermüller} and Y. {Aloimonos}},
journal={Proceedings of the IEEE},
title={Contour Motion Estimation for Asynchronous Event-Driven Cameras},
year={2014},
volume={102},
number={10},
pages={1537-1556},
keywords={computer vision;image sensors;image sequences;motion estimation;video signal processing;contour motion estimation;asynchronous event-driven cameras;image motion estimation;computer vision motion research;input frame-based video sequences;event-based image motion;local spatio-temporal information;dynamic vision sensor;DVS;intensity signals;multiresolution scheme;velocity information coupling;luminance information coupling;optic flow techniques;Computer vision;Image motion analysis;Voltage control;Optical imaging;Optical filters;Motion estimation;Cameras;Asynchronous event-based vision;motion contour;neuromorphic devices;real-time systems;Asynchronous event-based vision;motion contour;neuromorphic devices;real-time systems},
doi={10.1109/JPROC.2014.2347207},
ISSN={1558-2256},
month={Oct},}

@INPROCEEDINGS{aung2018,
author={M. T. {Aung} and R. {Teo} and G. {Orchard}},
booktitle={2018 IEEE International Symposium on Circuits and Systems (ISCAS)},
title={Event-based Plane-fitting Optical Flow for Dynamic Vision Sensors in FPGA},
year={2018},
volume={},
number={},
pages={1-5},
keywords={field programmable gate arrays;image sensors;image sequences;pipeline processing;mobile agents;FPGA implementation;average absolute endpoint error;event-based plane-fitting optical flow estimation;full precision software implementation;dynamic vision sensors;Optical sensors;Field programmable gate arrays;Optical imaging;Adaptive optics;Image edge detection;Biomedical optical imaging},
doi={10.1109/ISCAS.2018.8351588},
ISSN={2379-447X},
month={May},}



@article{HordijkSC17,
  author    = {Bas J. Pijnacker Hordijk and
               Kirk Y. W. Scheper and
               Guido C. H. E. de Croon},
  title     = {Vertical Landing for Micro Air Vehicles using Event-Based Optical
               Flow},
  journal   = {CoRR},
  volume    = {abs/1702.00061},
  year      = {2017},
  url       = {http://arxiv.org/abs/1702.00061},
  archivePrefix = {arXiv},
  eprint    = {1702.00061},
  timestamp = {Mon, 13 Aug 2018 16:46:56 +0200},
  biburl    = {https://dblp.org/rec/bib/journals/corr/HordijkSC17},
  bibsource = {dblp computer science bibliography, https://dblp.org}
}

@inproceedings{lucaskanade,
 author = {Lucas, Bruce D. and Kanade, Takeo},
 title = {An Iterative Image Registration Technique with an Application to Stereo Vision},
 booktitle = {Proceedings of the 7th International Joint Conference on Artificial Intelligence - Volume 2},
 series = {IJCAI'81},
 year = {1981},
 location = {Vancouver, BC, Canada},
 pages = {674--679},
 numpages = {6},
 url = {http://dl.acm.org/citation.cfm?id=1623264.1623280},
 acmid = {1623280},
 publisher = {Morgan Kaufmann Publishers Inc.},
 address = {San Francisco, CA, USA},
} 


@book{dayan2001theoretical,
  title={Theoretical neuroscience},
  author={Dayan, Peter and Abbott, Laurence F},
  volume={806},
  year={2001},
  publisher={Cambridge, MA: MIT Press}
}


@article{brosch2015,
author = {Brosch, Tobias and Tschechne, Stephan and Neumann, Heiko},
year = {2015},
month = {04},
pages = {137},
title = {On event-based optical flow detection},
volume = {9},
journal = {Frontiers in neuroscience},
doi = {10.3389/fnins.2015.00137}
}

@InProceedings{tschechne2014,
author={Tschechne, Stephan and Sailer, Roman and Neumann, Heiko},
editor={El Gayar, Neamat and Schwenker, Friedhelm and Suen, Cheng},
title={Bio-Inspired Optic Flow from Event-Based Neuromorphic Sensor Input},
booktitle={Artificial Neural Networks in Pattern Recognition},
year={2014},
publisher={Springer International Publishing},
address={Cham},
pages={171--182},
abstract={Computational models of visual processing often use frame-based image acquisition techniques to process a temporally changing stimulus. This approach is unlike biological mechanisms that are spike-based and independent of individual frames. The neuromorphic Dynamic Vision Sensor (DVS) [Lichtsteiner et al., 2008] provides a stream of independent visual events that indicate local illumination changes, resembling spiking neurons at a retinal level. We introduce a new approach for the modelling of cortical mechanisms of motion detection along the dorsal pathway using this type of representation. Our model combines filters with spatio-temporal tunings also found in visual cortex to yield spatio-temporal and direction specificity. We probe our model with recordings of test stimuli, articulated motion and ego-motion. We show how our approach robustly estimates optic flow and also demonstrate how this output can be used for classification purposes.},
isbn={978-3-319-11656-3}
}

@InProceedings{barranco2015,
author={Barranco, Francisco and Fermuller, Cornelia and Aloimonos, Yiannis},
editor={Rojas, Ignacio and Joya, Gonzalo and Catala, Andreu},
title={Bio-inspired Motion Estimation with Event-Driven Sensors},
booktitle={Advances in Computational Intelligence},
year={2015},
publisher={Springer International Publishing},
address={Cham},
pages={309--321},
abstract={This paper presents a method for image motion estimation for event-based sensors. Accurate and fast image flow estimation still challenges Computer Vision. A new paradigm based on asynchronous event-based data provides an interesting alternative and has shown to provide good estimation at high contrast contours by estimating motion based on very accurate timing. However, these techniques still fail in regions of high-frequency texture. This work presents a simple method for locating those regions, and a novel phase-based method for event sensors that estimates more accurately these regions. Finally, we evaluate and compare our results with other state-of-the-art techniques.},
isbn={978-3-319-19258-1}
}

@INPROCEEDINGS{milde2015,
author={M. B. {Milde} and O. J. N. {Bertrand} and R. {Benosmanz} and M. {Egelhaaf} and E. {Chicca}},
booktitle={2015 International Conference on Event-based Control, Communication, and Signal Processing (EBCCSP)},
title={Bioinspired event-driven collision avoidance algorithm based on optic flow},
year={2015},
volume={},
number={},
pages={1-7},
keywords={image sequences;mobile agents;robot vision;bioinspired event-driven collision avoidance algorithm;mobile agent;translational optic flow components;rotational optic flow components;gaze control;flight control;dynamic vision sensor;plane-fitting algorithm;spatio-temporal cuboid;collision avoidance direction;event-based depth-structure;Collision avoidance;Optical sensors;Optical imaging;Robots;Voltage control;Optical computing;Biomedical optical imaging},
doi={10.1109/EBCCSP.2015.7300673},
ISSN={null},
month={June},}

@inproceedings{brosch2016,
 author = {Brosch, Tobias and Neumann, Heiko},
 title = {Event-based Optical Flow on Neuromorphic Hardware},
 booktitle = {Proceedings of the 9th EAI International Conference on Bio-inspired Information and Communications Technologies (Formerly BIONETICS)},
 series = {BICT'15},
 year = {2016},
 isbn = {978-1-63190-100-3},
 location = {New York City, United States},
 pages = {551--558},
 numpages = {8},
 url = {http://dx.doi.org/10.4108/eai.3-12-2015.2262447},
 doi = {10.4108/eai.3-12-2015.2262447},
 acmid = {2954727},
 publisher = {ICST (Institute for Computer Sciences, Social-Informatics and Telecommunications Engineering)},
 address = {ICST, Brussels, Belgium, Belgium},
 keywords = {address-event representation, event-based sensing, neuromorphic computing, optical flow, real-time vision},
} 

@article{zhu2018ev,
  title={EV-FlowNet: Self-supervised optical flow estimation for event-based cameras},
  author={Zhu, Alex Zihao and Yuan, Liangzhe and Chaney, Kenneth and Daniilidis, Kostas},
  journal={arXiv preprint arXiv:1802.06898},
  year={2018}
}

@inproceedings{zhu2019unsupervised,
  title={Unsupervised event-based learning of optical flow, depth, and egomotion},
  author={Zhu, Alex Zihao and Yuan, Liangzhe and Chaney, Kenneth and Daniilidis, Kostas},
  booktitle={Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition},
  pages={989--997},
  year={2019}
}

@inproceedings{jason2016back,
  title={Back to basics: Unsupervised learning of optical flow via brightness constancy and motion smoothness},
  author={Jason, J Yu and Harley, Adam W and Derpanis, Konstantinos G},
  booktitle={European Conference on Computer Vision},
  pages={3--10},
  year={2016},
  organization={Springer}
}

@article{serrano2013128,
  title={A 128\backslash$,$\backslash$times $128 1.5\% Contrast Sensitivity 0.9\% FPN 3 $\mu$s Latency 4 mW Asynchronous Frame-Free Dynamic Vision Sensor Using Transimpedance Preamplifiers},
  author={Serrano-Gotarredona, Teresa and Linares-Barranco, Bernab{\'e}},
  journal={IEEE Journal of Solid-State Circuits},
  volume={48},
  number={3},
  pages={827--838},
  year={2013},
  publisher={IEEE}
}

@article{zhu2018multivehicle,
  title={The multivehicle stereo event camera dataset: An event camera dataset for 3D perception},
  author={Zhu, Alex Zihao and Thakur, Dinesh and {\"O}zaslan, Tolga and Pfrommer, Bernd and Kumar, Vijay and Daniilidis, Kostas},
  journal={IEEE Robotics and Automation Letters},
  volume={3},
  number={3},
  pages={2032--2039},
  year={2018},
  publisher={IEEE}
}

@INPROCEEDINGS{zhu2017,
author={A. Z. {Zhu} and N. {Atanasov} and K. {Daniilidis}},
booktitle={2017 IEEE International Conference on Robotics and Automation (ICRA)},
title={Event-based feature tracking with probabilistic data association},
year={2017},
volume={},
number={},
pages={4465-4470},
keywords={image sequences;optimisation;probability;robot vision;sensor fusion;tracking;event-based feature tracking;probabilistic data association;asynchronous event-based sensors;basic robot vision problems;optical flow computation;optical flow quality;soft data association;intertwined EM scheme;Optical imaging;Optical sensors;Cameras;Spatiotemporal phenomena;Computational modeling;Integrated optics;Optical computing},
doi={10.1109/ICRA.2017.7989517},
ISSN={null},
month={May},}

@ARTICLE{gallego2017,
author={G. {Gallego} and D. {Scaramuzza}},
journal={IEEE Robotics and Automation Letters},
title={Accurate Angular Velocity Estimation With an Event Camera},
year={2017},
volume={2},
number={2},
pages={632-639},
keywords={image processing;optimisation;angular velocity estimation;event camera;independent pixels;microsecond resolution;contrast maximization design;Cameras;Trajectory;Image edge detection;Angular velocity;Optical imaging;Estimation;Robot vision systems;Computer vision for other robotic applications;localization},
doi={10.1109/LRA.2016.2647639},
ISSN={2377-3774},
month={April},}

@article{mitrokhin2018,
  author    = {Anton Mitrokhin and
               Cornelia Ferm{\"{u}}ller and
               Chethan Parameshwara and
               Yiannis Aloimonos},
  title     = {Event-based Moving Object Detection and Tracking},
  journal   = {CoRR},
  volume    = {abs/1803.04523},
  year      = {2018},
  url       = {http://arxiv.org/abs/1803.04523},
  archivePrefix = {arXiv},
  eprint    = {1803.04523},
  timestamp = {Mon, 13 Aug 2018 16:46:57 +0200},
  biburl    = {https://dblp.org/rec/bib/journals/corr/abs-1803-04523},
  bibsource = {dblp computer science bibliography, https://dblp.org}
}

@article{gallego2018,
  author    = {Guillermo Gallego and
               Henri Rebecq and
               Davide Scaramuzza},
  title     = {A Unifying Contrast Maximization Framework for Event Cameras, with
               Applications to Motion, Depth, and Optical Flow Estimation},
  journal   = {CoRR},
  volume    = {abs/1804.01306},
  year      = {2018},
  url       = {http://arxiv.org/abs/1804.01306},
  archivePrefix = {arXiv},
  eprint    = {1804.01306},
  timestamp = {Mon, 13 Aug 2018 16:49:08 +0200},
  biburl    = {https://dblp.org/rec/bib/journals/corr/abs-1804-01306},
  bibsource = {dblp computer science bibliography, https://dblp.org}
}

@article{liu2018,
  author    = {Min Liu and
               Tobi Delbr{\"{u}}ck},
  title     = {{ABMOF:} {A} Novel Optical Flow Algorithm for Dynamic Vision Sensors},
  journal   = {CoRR},
  volume    = {abs/1805.03988},
  year      = {2018},
  url       = {http://arxiv.org/abs/1805.03988},
  archivePrefix = {arXiv},
  eprint    = {1805.03988},
  timestamp = {Mon, 13 Aug 2018 16:48:37 +0200},
  biburl    = {https://dblp.org/rec/bib/journals/corr/abs-1805-03988},
  bibsource = {dblp computer science bibliography, https://dblp.org}
}

@article{flownet,
  author    = {Philipp Fischer and
               Alexey Dosovitskiy and
               Eddy Ilg and
               Philip H{\"{a}}usser and
               Caner Hazirbas and
               Vladimir Golkov and
               Patrick van der Smagt and
               Daniel Cremers and
               Thomas Brox},
  title     = {FlowNet: Learning Optical Flow with Convolutional Networks},
  journal   = {CoRR},
  volume    = {abs/1504.06852},
  year      = {2015},
  url       = {http://arxiv.org/abs/1504.06852},
  archivePrefix = {arXiv},
  eprint    = {1504.06852},
  timestamp = {Mon, 13 Aug 2018 16:49:12 +0200},
  biburl    = {https://dblp.org/rec/bib/journals/corr/FischerDIHHGSCB15},
  bibsource = {dblp computer science bibliography, https://dblp.org}
}
@article{unet,
  author    = {Olaf Ronneberger and
               Philipp Fischer and
               Thomas Brox},
  title     = {U-Net: Convolutional Networks for Biomedical Image Segmentation},
  journal   = {CoRR},
  volume    = {abs/1505.04597},
  year      = {2015},
  url       = {http://arxiv.org/abs/1505.04597},
  archivePrefix = {arXiv},
  eprint    = {1505.04597},
  timestamp = {Mon, 13 Aug 2018 16:46:52 +0200},
  biburl    = {https://dblp.org/rec/bib/journals/corr/RonnebergerFB15},
  bibsource = {dblp computer science bibliography, https://dblp.org}
}



@article{Yu2016,
  author    = {Jason J. Yu and
               Adam W. Harley and
               Konstantinos G. Derpanis},
  title     = {Back to Basics: Unsupervised Learning of Optical Flow via Brightness
               Constancy and Motion Smoothness},
  journal   = {CoRR},
  volume    = {abs/1608.05842},
  year      = {2016},
  url       = {http://arxiv.org/abs/1608.05842},
  archivePrefix = {arXiv},
  eprint    = {1608.05842},
  timestamp = {Mon, 13 Aug 2018 16:48:51 +0200},
  biburl    = {https://dblp.org/rec/bib/journals/corr/YuHD16},
  bibsource = {dblp computer science bibliography, https://dblp.org}
}

@inproceedings{ren2017,
author = {Ren, Zhe and Yan, Junchi and Ni, Bingbing and Liu, Bin and Yang, Xiaokang and Zha, Hongyuan},
year = {2017},
month = {02},
pages = {},
title = {Unsupervised Deep Learning for Optical Flow Estimation}
}


@incollection{lai2017,
title = {Semi-Supervised Learning for Optical Flow with Generative Adversarial Networks},
author = {Lai, Wei-Sheng and Huang, Jia-Bin and Yang, Ming-Hsuan},
booktitle = {Advances in Neural Information Processing Systems 30},
editor = {I. Guyon and U. V. Luxburg and S. Bengio and H. Wallach and R. Fergus and S. Vishwanathan and R. Garnett},
pages = {354--364},
year = {2017},
publisher = {Curran Associates, Inc.},
url = {http://papers.nips.cc/paper/6639-semi-supervised-learning-for-optical-flow-with-generative-adversarial-networks.pdf}
}

@ARTICLE{giulioni2016,
AUTHOR={Giulioni, Massimiliano and Lagorce, Xavier and Galluppi, Francesco and Benosman, Ryad B.},   
TITLE={Event-Based Computation of Motion Flow on a Neuromorphic Analog Neural Platform},      
JOURNAL={Frontiers in Neuroscience},      
VOLUME={10},      
PAGES={35},     
YEAR={2016},      
URL={https://www.frontiersin.org/article/10.3389/fnins.2016.00035},       
DOI={10.3389/fnins.2016.00035},
ISSN={1662-453X},   
ABSTRACT={Estimating the speed and direction of moving objects is a crucial component of agents behaving in a dynamic world. Biological organisms perform this task by means of the neural connections originating from their retinal ganglion cells. In artificial systems the optic flow is usually extracted by comparing activity of two or more frames captured with a vision sensor. Designing artificial motion flow detectors which are as fast, robust, and efficient as the ones found in biological systems is however a challenging task. Inspired by the architecture proposed by Barlow and Levick in 1965 to explain the spiking activity of the direction-selective ganglion cells in the rabbit's retina, we introduce an architecture for robust optical flow extraction with an analog neuromorphic multi-chip system. The task is performed by a feed-forward network of analog integrate-and-fire neurons whose inputs are provided by contrast-sensitive photoreceptors. Computation is supported by the precise time of spike emission, and the extraction of the optical flow is based on time lag in the activation of nearby retinal neurons. Mimicking ganglion cells our neuromorphic detectors encode the amplitude and the direction of the apparent visual motion in their output spiking pattern. Hereby we describe the architectural aspects, discuss its latency, scalability, and robustness properties and demonstrate that a network of mismatched delicate analog elements can reliably extract the optical flow from a simple visual scene. This work shows how precise time of spike emission used as a computational basis, biological inspiration, and neuromorphic systems can be used together for solving specific tasks.}
}


@unknown{richter2014,
author = {Richter, Christoph and Röhrbein, Florian and Conradt, Jorg},
year = {2014},
month = {09},
pages = {},
title = {Bio-inspired optic flow detection using neuromorphic hardware},
doi = {10.12751/nncn.bc2014.0032}
}

@article{Orchard2013,
  title={A spiking neural network architecture for visual motion estimation},
  author={Garrick Orchard and Ryad B. Benosman and Ralph Etienne-Cummings and Nitish V. Thakor},
  journal={2013 IEEE Biomedical Circuits and Systems Conference (BioCAS)},
  year={2013},
  pages={298-301}
}

@inproceedings{horowitz20141,
  title={1.1 computing's energy problem (and what we can do about it)},
  author={Horowitz, Mark},
  booktitle={2014 IEEE International Solid-State Circuits Conference Digest of Technical Papers (ISSCC)},
  pages={10--14},
  year={2014},
  organization={IEEE}
}

@article{rueckauer2017conversion,
  title={Conversion of continuous-valued deep networks to efficient event-driven networks for image classification},
  author={Rueckauer, Bodo and Lungu, Iulia-Alexandra and Hu, Yuhuang and Pfeiffer, Michael and Liu, Shih-Chii},
  journal={Frontiers in neuroscience},
  volume={11},
  pages={682},
  year={2017},
  publisher={Frontiers}
}

@article{werbos1990backpropagation,
  title={Backpropagation through time: what it does and how to do it},
  author={Werbos, Paul J},
  journal={Proceedings of the IEEE},
  volume={78},
  number={10},
  pages={1550--1560},
  year={1990},
  publisher={IEEE}
}

@article{kingma2014adam,
  title={Adam: A method for stochastic optimization},
  author={Kingma, Diederik P and Ba, Jimmy},
  journal={arXiv preprint arXiv:1412.6980},
  year={2014}
}

@ARTICLE{panda2019towards,
AUTHOR={Panda, Priyadarshini and Aketi, Sai Aparna and Roy, Kaushik},   
TITLE={Toward Scalable, Efficient, and Accurate Deep Spiking Neural Networks With Backward Residual Connections, Stochastic Softmax, and Hybridization},   
JOURNAL={Frontiers in Neuroscience},   
VOLUME={14},      
PAGES={653},     
YEAR={2020},      
ISSN={1662-453X},   
ABSTRACT={Spiking Neural Networks (SNNs) may offer an energy-efficient alternative for implementing deep learning applications. In recent years, there have been several proposals focused on supervised (conversion, spike-based gradient descent) and unsupervised (spike timing dependent plasticity) training methods to improve the accuracy of SNNs on large-scale tasks. However, each of these methods suffer from scalability, latency, and accuracy limitations. In this paper, we propose novel algorithmic techniques of modifying the SNN configuration with backward residual connections, stochastic softmax, and hybrid artificial-and-spiking neuronal activations to improve the learning ability of the training methodologies to yield competitive accuracy, while, yielding large efficiency gains over their artificial counterparts. Note, artificial counterparts refer to conventional deep learning/artificial neural networks. Our techniques apply to VGG/Residual architectures, and are compatible with all forms of training methodologies. Our analysis reveals that the proposed solutions yield near state-of-the-art accuracy with significant energy-efficiency and reduced parameter overhead translating to hardware improvements on complex visual recognition tasks, such as, CIFAR10, Imagenet datatsets.}
}

@article{diehl2015unsupervised,
  title={Unsupervised learning of digit recognition using spike-timing-dependent plasticity},
  author={Diehl, Peter U and Cook, Matthew},
  journal={Frontiers in computational neuroscience},
  volume={9},
  pages={99},
  year={2015},
  publisher={Frontiers}
}
@article{valles2018,
  author    = {Federico Paredes{-}Vall{\'{e}}s and
               Kirk Y. W. Scheper and
               Guido C. H. E. de Croon},
  title     = {Unsupervised Learning of a Hierarchical Spiking Neural Network for
               Optical Flow Estimation: From Events to Global Motion Perception},
  journal   = {CoRR},
  volume    = {abs/1807.10936},
  year      = {2018},
  url       = {http://arxiv.org/abs/1807.10936},
  archivePrefix = {arXiv},
  eprint    = {1807.10936},
  timestamp = {Mon, 13 Aug 2018 16:47:51 +0200},
  biburl    = {https://dblp.org/rec/bib/journals/corr/abs-1807-10936},
  bibsource = {dblp computer science bibliography, https://dblp.org}
}


@ARTICLE{stdpdiehlandcook2015,
AUTHOR={Diehl, Peter and Cook, Matthew},   
TITLE={Unsupervised learning of digit recognition using spike-timing-dependent plasticity},     
JOURNAL={Frontiers in Computational Neuroscience},      
VOLUME={9},      
PAGES={99},     
YEAR={2015},      
URL={https://www.frontiersin.org/article/10.3389/fncom.2015.00099},       
DOI={10.3389/fncom.2015.00099},      
ISSN={1662-5188},   
ABSTRACT={In order to understand how the mammalian neocortex is performing computations, two things are necessary; we need to have a good understanding of the available neuronal processing units and mechanisms, and we need to gain a better understanding of how those mechanisms are combined to build functioning systems. Therefore, in recent years there is an increasing interest in how spiking neural networks (SNN) can be used to perform complex computations or solve pattern recognition tasks. However, it remains a challenging task to design SNNs which use biologically plausible mechanisms (especially for learning new patterns), since most such SNN architectures rely on training in a rate-based network and subsequent conversion to a SNN. We present a SNN for digit recognition which is based on mechanisms with increased biological plausibility, i.e., conductance-based instead of current-based synapses, spike-timing-dependent plasticity with time-dependent weight change, lateral inhibition, and an adaptive spiking threshold. Unlike most other systems, we do not use a teaching signal and do not present any class labels to the network. Using this unsupervised learning scheme, our architecture achieves 95% accuracy on the MNIST benchmark, which is better than previous SNN implementations without supervision. The fact that we used no domain-specific knowledge points toward the general applicability of our network design. Also, the performance of our network scales well with the number of neurons used and shows similar performance for four different learning rules, indicating robustness of the full combination of mechanisms, which suggests applicability in heterogeneous biological neural networks.}
}

@Article{flymotion2010,
  author   = {Borst, Alexander and Haag, Juergen and Reiff, Dierk F.},
  journal  = {Annual Review of Neuroscience},
  title    = {Fly Motion Vision},
  year     = {2010},
  note     = {PMID: 20225934},
  number   = {1},
  pages    = {49-70},
  volume   = {33},
  abstract = {Fly motion vision and resultant compensatory optomotor responses are a classic example for neural computation. Here we review our current understanding of processing of optic flow as generated by an animal's self-motion. Optic flow processing is accomplished in a series of steps: First, the time-varying photoreceptor signals are fed into a two-dimensional array of Reichardt-type elementary motion detectors (EMDs). EMDs compute, in parallel, local motion vectors at each sampling point in space. Second, the output signals of many EMDs are spatially integrated on the dendrites of large-field tangential cells in the lobula plate. In the third step, tangential cells form extensive interactions with each other, giving rise to their large and complex receptive fields. Thus, tangential cells can act as matched filters tuned to optic flow during particular flight maneuvers. They finally distribute their information onto postsynaptic descending neurons, which either instruct the motor centers of the thoracic ganglion for flight and locomotion control or act themselves as motor neurons that control neck muscles for head movements.},
  doi      = {10.1146/annurev-neuro-060909-153155},
  eprint   = {https://doi.org/10.1146/annurev-neuro-060909-153155},
  url      = {https://doi.org/10.1146/annurev-neuro-060909-153155},
}
@article {honeybee1996,
	author = {Srinivasan, M and Zhang, S and Lehrer, M and Collett, T},
	title = {Honeybee navigation en route to the goal: visual flight control and odometry},
	volume = {199},
	number = {1},
	pages = {237--244},
	year = {1996},
	publisher = {The Company of Biologists Ltd},
	abstract = {Recent research has uncovered a number of different ways in which bees use cues derived from optic flow for navigational purposes. The distance flown to a food source is gauged by integrating the apparent motion of the visual world that is experienced en route. In other words, bees possess a visually driven {\textquoteright}odometer{\textquoteright} that is robust to variations in wind load and energy expenditure. Bees flying through a tunnel maintain equidistance to the flanking walls by balancing the apparent speeds of the images of the walls. This strategy enables them to negotiate narrow passages or to fly between obstacles. The speed of flight in a tunnel is controlled by holding constant the average image velocity as seen by the two eyes. This avoids potential collisions by ensuring that the bee slows down when flying through narrow passages. Bees landing on a horizontal surface hold constant the image velocity of the surface as they approach it. This automatically ensures that flight speed decreases with altitude and is close to zero at touchdown. The movement-sensitive mechanisms underlying these various behaviours seem to be different, qualitatively as well as quantitatively, from those mediating the well-investigated optomotor response.},
	issn = {0022-0949},
	URL = {https://jeb.biologists.org/content/199/1/237},
	eprint = {https://jeb.biologists.org/content/199/1/237.full.pdf},
	journal = {Journal of Experimental Biology}
}

@article{maas1997,
title = {Networks of spiking neurons: The third generation of neural network models},
journal = {Neural Networks},
volume = {10},
number = {9},
pages = {1659 - 1671},
year = {1997},
issn = {0893-6080},
doi = {https://doi.org/10.1016/S0893-6080(97)00011-7},
url = {http://www.sciencedirect.com/science/article/pii/S0893608097000117},
author = {Wolfgang Maass},
keywords = {Spiking neuron, Integrate-and-fire neutron, Computational complexity, Sigmoidal neural nets, Lower bounds},
abstract = {The computational power of formal models for networks of spiking neurons is compared with that of other neural network models based on McCulloch Pitts neurons (i.e., threshold gates), respectively, sigmoidal gates. In particular it is shown that networks of spiking neurons are, with regard to the number of neurons that are needed, computationally more powerful than these other neural network models. A concrete biologically relevant function is exhibited which can be computed by a single spiking neuron (for biologically reasonable values of its parameters), but which requires hundreds of hidden units on a sigmoidal neural net. On the other hand, it is known that any function that can be computed by a small sigmoidal neural net can also be computed by a small network of spiking neurons. This article does not assume prior knowledge about spiking neurons, and it contains an extensive list of references to the currently available literature on computations in networks of spiking neurons and relevant results from neurobiology.}
}

@article{sun2014,
 author = {Sun, Deqing and Roth, Stefan and Black, Michael J.},
 title = {A Quantitative Analysis of Current Practices in Optical Flow Estimation and the Principles Behind Them},
 journal = {Int. J. Comput. Vision},
 issue_date = {January   2014},
 volume = {106},
 number = {2},
 month = jan,
 year = {2014},
 issn = {0920-5691},
 pages = {115--137},
 numpages = {23},
 url = {http://dx.doi.org/10.1007/s11263-013-0644-x},
 doi = {10.1007/s11263-013-0644-x},
 acmid = {2584268},
 publisher = {Kluwer Academic Publishers},
 address = {Hingham, MA, USA},
 keywords = {Median filtering, Motion boundary, Non-local term, Optical flow estimation, Practices},
} 

