@misc{AF2012,
author = {U.S.A.F.},
title = {Mitigating Sensor Saturation through Image Processing Techniques},
year = {12-20-2012},
howpublished = {http://www.dodsbir.net/sitis/display\_topic.asp?Bookmark=44023},
}

@article{Branchitta2008,
author = {F. Branchitta and M. Diana and G. Corsini and A. Porta},
title = {Dynamic-range compression and contrast enhancement in infrared imaging systems},
journaltitle = {Optical Engineering},
volume ={47},
issue ={7},
year = {2008},
pages = {47-54},
}
@article{Burt1993,
author = {P.J. Burt and R.J. Kolczynski},
title = {Enhanced Image Resolution through Fusion},
journaltitle = {Fourth International Conference on Computer Vision},
year = {1993},
pages = {173-182},
abstract = {
The authors present an extension to the pyramid approach to image fusion. The modifications address problems that were encountered with past implementations of pyramid-based fusion. In particular, the modifications provide greater shift invariance and immunity to video noise, and provide at least a partial solution to the problem of combining components that have roughly equal salience but opposite contrasts. The fusion algorithm was found to perform well for a range of tasks without requiring adjustment of the algorithm parameters. Results were remarkably insensitive to changes in these parameters, suggesting that the procedure is both robust and generic. A composite imaging technique is outlined that may provide a powerful tool for image capture. By fusing a set of images obtained under restricted, narrowband, imaging conditions, it is often possible to construct an image that has enhanced information content when compared to a single image obtained directly with a broadband sensor
}
}

@misc{CambridgeInColour2013,
author = {Cambridge In Colour},
title = { Cameras Vs The Human Eye},
year = {2013},
howpublished = {http://www.cambridgeincolour.com/tutorials/cameras-vs-human-eye.htm},
abstract = {
Dynamic range* is one area where the eye is often seen as having a huge advantage. If we were to consider situations where our pupil opens and closes for different brightness regions, then yes, our eyes far surpass the capabilities of a single camera image (and can have a range exceeding 24 f-stops). However, in such situations our eye is dynamically adjusting like a video camera, so this arguably isn't a fair comparison.

If we were to instead consider our eye's instantaneous dynamic range (where our pupil opening is unchanged), then cameras fare much better. This would be similar to looking at one region within a scene, letting our eyes adjust, and not looking anywhere else. In that case, most estimate that our eyes can see anywhere from 10-14 f-stops of dynamic range, which definitely surpasses most compact cameras (5-7 stops), but is surprisingly similar to that of digital SLR cameras (8-11 stops).

On the other hand, our eye's dynamic range also depends on brightness and subject contrast, so the above only applies to typical daylight conditions. With low-light star viewing our eyes can approach an even higher instantaneous dynamic range, for example.}
}

@misc{Camera2011,
author = {ReportLinker},
title = {Camera Industry: Market Research Reports, Statistics and Analysis},
year = {12-29-2012},
howpublished = {http://www.reportlinker.com/ci02061/Camera.html},
}

@misc{Clark2009,
author = {R.N. Clark},
title = {Notes on the Resolution and Other Details of the Human Eye },
year = {11-25-2009},
howpublished = {http://www.clarkvision.com/articles/eye-resolution.html},
abstract = {
The Human eye is able to function in bright sunlight and view faint starlight, a range of more than 10 million to one. But this is like saying a camera can function over a similar range by adjusting the ISO speed, aperture and exposure time.

In any one view, the eye eye can see over a 10,000 range in contrast detection, but it depends on the scene brightness, with the range decreasing with lower contrast targets. The eye is a contrast detector, not an absolute detector like the sensor in a digital camera, thus the distinction. (See Figure 2.6 in Clark, 1990; Blackwell, 1946, and references therein). The range of the human eye is greater than any film or consumer digital camera.

Here is a simple experiment you can do. Go out with a star chart on a clear night with a full moon. Wait a few minutes for your eyes to adjust. Now find the faintest stars you can detect when the you can see the full moon in your field of view. Try and limit the moon and stars to within about 45 degrees of straight up (the zenith). If you have clear skies away from city lights, you will probably be able to see magnitude 3 stars. The full moon has a stellar magnitude of -12.5. If you can see magnitude 2.5 stars, the magnitude range you are seeing is 15. Every 5 magnitudes is a factor of 100, so 15 is 100 * 100 * 100 = 1,000,000. Thus, the dynamic range in this relatively low light condition is about 1 million to one, perhaps higher! }
}

@article{Debevec1997,
author = {P.E. Debevec and J. Malik},
title = {Recovering high dynamic range radiance maps from photographs},
journaltitle = {ACM SIGGRAPH 1997 Conference Proceeedings},
year = {1997},
pages = {369-378},
abstract = {
We present a method of recovering high dynamic range radiance
maps from photographs taken with conventional imaging equip-
ment. In our method, multiple photographs of the scene are taken
with different amounts of exposure. Our algorithm uses these dif-
ferently exposed photographs to recover the response function of the
imaging process, up to factor of scale, using the assumption of reci-
procity. With the known response function, the algorithm can fuse
the multiple photographs into a single, high dynamic range radiance
map whose pixel values are proportional to the true radiance values
in the scene. We demonstrate our method on images acquired with
both photochemical and digital imaging processes. We discuss how
this work is applicable in many areas of computer graphics involv-
ing digitized photographs, including image-based modeling, image
compositing, and image processing. Lastly, we demonstrate a few
applications of having high dynamic range radiance maps, such as
synthesizing realistic motion blur and simulating the response of the
human visual system.
}
}

@article{Eastwood2012,
author = {Brian Eastwood and Elisabeth Childs},
title  = {Image alignment for multiple camera high dynamic range microscopy},
journaltitle = {IEEE WACV},
year = {2012},
pages = {225-232},
abstract = {
This paper investigates the problem of image alignment for multiple camera high dynamic range (HDR) imaging. HDR imaging combines information from images taken with different exposure settings. Combining information from multiple cameras requires an alignment process that is robust to the intensity differences in the images. HDR applications that use a limited number of component images require an alignment technique that is robust to large exposure differences. We evaluate the suitability for HDR alignment of three exposure-robust techniques. We conclude that image alignment based on matching feature descriptors extracted from radiant power images from calibrated cameras yields the most accurate and robust solution. We demonstrate the use of this alignment technique in a high dynamic range video microscope that enables live specimen imaging with a greater level of detail than can be captured with a single camera.
}
}

@article{Eden2006,
author = {A. Eden and M. Uyttendaele and R. Szeliski},
title = {Seamless stitching of scenes with large motions and exposure differences},
journaltitle = {IEEE Computer Society Conference on Computer Vision and Pattern Recognition},
year = {2006},
pages = {2498-2505},
}

@misc{Freeman2008,
author = {M. Freeman},
title = {Mastering HDR Photography},
year = {2008},
howpublished = {Amphoto Books}
}

@misc{Getty2002,
author = {J. Paul Getty Museum},
title = {Gustave Le Gray, Photographer},
year = {7-29-2002},
howpublished = {http://www.getty.edu/art/exhibitions/le\_gray/},
abstract = {
Because of the limitations of photographic materials at the time, it was extremely difficult to capture both sea and sky in a single image. By combining two negatives—one for the foreground water and another for the clouds—Le Gray triumphed. This use of combination printing was not criticized when he exhibited his seascapes, although it is unlikely that he hung finished images utilizing the same negative for the sky in each adjacent to one other, so perhaps it went undetected. More important was the harmonious union produced by his innovative technique, which he originally invented for landscapes with overexposed skies.
}
}


@misc{Gulbins2009,
author = {J. Gulbins and R. Gulbins},
title = {Photgraphic Multishot Techniques: High Dynamic Range, Super Resolution, Extended Depth of Field, Stitching},
year = {2009},
howpublished = {Rocky Nook}
}

@article{Hasinoff2010,
author = {S.W. Hasinoff and F. Durand and W.T. Freeman},
title = {Noise-optimal capture for high dynamic range photography},
journaltitle = {IEEE Computer Society Conference on Computer Vision and Pattern Recognition},
year = {2010}
}

@article{Mann1995,
author = {S. Mann and R.W. Picard},
title = {On being `undigital' with digital cameras: Extending dynamic range by combining differently exposed pictures},
journaltitle = {First IEEE Conference on Image Processing},
year = {1995},
pages = {363-367},
abstract = {
Most everyday scenes have a far greater dynamic range than can be recorded on a photographic film or electronic imaging apparatus (whether it be a digital still camera, video, etc.). However, a set of pictures, that are identical except for their exposure, collectively show us much more dynamic range than any single picture. The dark pictures show us highlight details of the scene that would be washed out in a "properly exposed" picture, while the light pictures show us some shadow detail that would also not appear in a "properly exposed" picture. We propose a means of combining differently exposed pictures to obtain a single picture of extended dynamic range, and improved color fidelity. Given a set of digital pictures, we may produce a single picture which is, for all practical purposes, `undigital', in the sense that it is a floating point image, with the kind of dynamic range we are accustomed to seeing in typical floating point representations, as opposed to the integer images from which it was generated. The method is completely automatic; it requires no human intervention, and it requires no knowledge of the response function of the imaging device. It works reliably with images from a digital camera of unknown response, or from a scanner with unknown response, scanning an unknown film type. 1. 
}
}

@article{Naseem1999,
author = {Naseem Y. Aziz and Robert F. Cannata and Glenn T. Kincaid and Randal J. Hansen and Jeffry L. Heath and William J. Parish and Susan M. Petronio and James T. Woolaway},
title = {Standardized high-perfomance 640x512 readout integrated circuit for infrared applications},
year = {1999},
journal = {SPIE Proceedings Infrared Technology and Applications},
volume = {3698},
issue = {XXV},
pages = {766-777}
}

@article{ Mitsunga1999,
author = {T. Mitsunga and S.K. Nayar },
title = {Radiometric self calibration},
journal = {IEEE CVPR'99},
year = {1999},
pages = {374-380},
abstract ={
A simple algorithm is described that computes the ra-
diometric response function of an imaging system, from
images of an arbitrar), scene taken using different ex-
posures. The exposure is varied by changing either the
aperture setting or the shutter speed. The algorithm
does not require precise estimates of the exposures used.
Rough estimates of the ratios of the exposures (e.g. F-
number settings on an inexpensive lens) are sufJicient
for accurate recovery of the response function as well
as the actual exposure ratios. The computed response
function is used to fuse the multiple images into a sin-
gle high dynamic range radiance image. Robustness is
tested using a variety of scenes and cameras as well as
noisy synthetic images generated using IO0 randomly
selected response curves. Automatic rejection of im-
age areas that have large vignetting effects or temporal
scene variations make the algorithm applicable to not
just photographic but also video cameras. Code for the
algorithm and several results are publicly available at
http://www.cs.columbia.edu/CAVE/.
}
}

@article{Pal2004,
author = {C. Pal and R. Szeliski and M. Uyttendaele and N. Jojic},
title = { Probability models for high dynamic range imaging},
journal = {IEEE CVPR'04},
year = {2004},
pages = {173-180},
abstract = {
Methods for expanding the dynamic range of digital pho-
tographs by combining images taken at different exposures
have recently received a lot of attention. Current techniques
assume that the photometric transfer function of a given
camera is the same (modulo an overall exposure change)
for all the input images. Unfortunately, this is rarely the
case with today’s camera, which may perform complex non-
linear color and intensity transforms on each picture. In
this paper, we show how the use of probability models for
the imaging system and weak prior models for the response
functions enable us to estimate a different function for each
image using only pixel intensity values. Our approach also
allows us to characterize the uncertainty inherent in each
pixel measurement. We can therefore produce statistically
optimal estimates for the hidden variables in our model rep-
resenting scene irradiance. We present results using this
method to statistically characterize camera imaging func-
tions and construct high-quality high dynamic range (HDR)
images using only image pixel information.
}

}

@misc{Reinhard2005,
author = {E. Reinhard and G. Ward and S. Pattanaik abd P. Debevec},
title = {High Dynamic Range Imaging: Acquisition, Display, and Image-Based Lighting},
year = {2005}
}


@article{Seetzen2004,
author = { Helge Seetzen and Wolfgang Heidrich and Wolfgang Steurzlinger and Greg Ward},
title = { High dynamic range display systems},
year = {2004},
journal = {ACM Transactions on Graphics},
volume = {23},
iusse = {3},
pages = {760-768},
abstract = {
The dynamic range of many real-world environments exceeds the capabilities of current display technology by several orders of magnitude. In this paper we discuss the design of two different display systems that are capable of displaying images with a dynamic range much more similar to that encountered in the real world. The first display system is based on a combination of an LCD panel and a DLP projector, and can be built from off-the-shelf components. While this design is feasible in a lab setting, the second display system, which relies on a custom-built LED panel instead of the projector, is more suitable for usual office workspaces and commercial applications. We describe the design of both systems as well as the software issues that arise. We also discuss the advantages and disadvantages of the two designs and potential applications for both systems.
}
}

@book{Szeliski2011,
author = {Richard Szeliski},
title = {Computer Vision Algorithms and Applications},
year = {2011},
publisher = {Springer-Verlag London Limited},
location = {New York},
}


