/*
    Copyright 2006-2012 Patrik Jonsson, sunrise@familjenjonsson.org

    This file is part of Sunrise.

    Sunrise is free software; you can redistribute it and/or modify
    it under the terms of the GNU General Public License as published by
    the Free Software Foundation; either version 3 of the License, or
    (at your option) any later version.

    Sunrise is distributed in the hope that it will be useful,
    but WITHOUT ANY WARRANTY; without even the implied warranty of
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    GNU General Public License for more details.

    You should have received a copy of the GNU General Public License
    along with Sunrise.  If not, see <http://www.gnu.org/licenses/>.

*/

/// \file
/// Classes for storing the emerging rays information.

#ifndef __emergence__
#define __emergence__

#include "mcrx-types.h"
#include "mcrx-units.h"
#include <vector>
#include "blitz/array.h"

#include "blitz/range.h"
#include "ray.h"
#include "constants.h"
#include "random.h"
#include "projection.h"
#include <cmath>
#include "boost/thread/mutex.hpp"
#include <string>
#include <set>
#include "mcrx-debug.h"
#include "emission.h"
#include "chromatic_policy.h"

using std::cout;
using std::endl;

namespace mcrx {
  template <typename> class emergence;
  class camera_base;
  template <typename> class camera;
  template <typename T_pixel> class camera<blitz::Array<T_pixel, 1> >; 
  template<typename> class camera_angular_distribution;
  template<typename T_pixel> class camera_angular_distribution<blitz::Array<T_pixel,1> >;
}

namespace CCfits {
  class ExtHDU;
  class HDU;
  class FITS;
}

/** Angular_distribution class for the cameras. This has the meaning
    of the probability of sampling a certain *outgoing* direction (based
    on uniform sampling of the normalized image plane). This is
    dP/dOmega = dP/dA * dA/dOmega. dP/dA is constant=1, so we just return
    1/area_scale. It is implemented as a thin wrapper on a reference
    to the camera. Don't delete the camera and think this makes
    sense... */
template <typename T_lambda> 
class mcrx::camera_angular_distribution :
  public angular_distribution<T_lambda>
{
 private:
  const camera<T_lambda>& c_;

 public:
  camera_angular_distribution (const camera<T_lambda>& c): 
    angular_distribution<T_lambda>(), c_(c) {};

  /** Returns the distribution function. We define this just like the
      emitters, i.e., the distribution is dependent on the outgoing
      direction (first argument). This requires special handling in
      the code when evaluating the paths and d is the incoming
      direction. */
  T_lambda distribution_function (const vec3d& d, const vec3d&) const {
    T_lambda f; distribution_function(d,vec3d(),f);
    return f; };
  /** This function assigns the distribution function in place. */
  virtual void distribution_function(const vec3d& d,
				     const vec3d&,
				     T_lambda& distr) const {
    const T_float f=1./c_.area_scale(vec3d(-d)); 
    distr =  f<blitz::huge(T_float()) ? f : 0; };
  boost::shared_ptr<angular_distribution<T_lambda> > clone() const {
    return boost::shared_ptr<angular_distribution<T_lambda> >
      (new camera_angular_distribution<T_lambda>(*this));};
};


template <typename T> 
class mcrx::camera_angular_distribution<blitz::Array<T,1> >: 
  public angular_distribution<blitz::Array<T,1> >
{
private:
  typedef blitz::Array<T,1> T_lambda;

  const camera<T_lambda>& c_;

public:
  camera_angular_distribution (const camera<T_lambda>& c): 
#ifdef MCRX_QUALIFY_BASE_CONSTRUCTORS
    angular_distribution<T_lambda>::
#endif
    angular_distribution<T_lambda> (), c_(c) {};

  /** Returns the distribution function. We define this just like the
      emitters, i.e., the distribution is dependent on the outgoing
      direction (first argument). This requires special handling in
      the code when evaluating the paths and d is the incoming
      direction. */
  T_lambda distribution_function (const vec3d& d, const vec3d&) const {
    T_lambda distr(c_.get_image().extent(blitz::thirdDim)); 
    distribution_function(d,vec3d(),distr); return distr; };
  /** This function assigns the distribution function in place. */
  virtual void distribution_function(const vec3d& d,
				     const vec3d&,
				     T_lambda& distr) const {
    const T_float f=1./c_.area_scale(vec3d(-d)); 
    distr = (f<blitz::huge(T_float()) ? f : 0); };
  boost::shared_ptr<angular_distribution<blitz::Array<T,1> > > clone() const {
    return boost::shared_ptr<angular_distribution<blitz::Array<T,1> > >
      (new camera_angular_distribution<blitz::Array<T,1> >(*this));};
};


/** Class keeping a set of cameras registering emergin rays. The
    emergence class keeps a vector of cameras (T_camera) and knows how
    to set up, save and load these cameras. */
template <typename image_type>
class mcrx::emergence {
public:
  typedef mcrx::T_float T_float;
  typedef image_type T_image;
  typedef ray<T_image> T_ray;
  typedef camera<T_image> T_camera;
  typedef typename std::vector<T_camera*>::iterator iterator;
  typedef typename std::vector<T_camera*>::const_iterator const_iterator;
  
private:
  /// The per-image quantities (e.g. images).
  std::vector<T_camera*> cameras;

public:
  /// Default constructor creates an empty emergence without cameras.
  emergence() {};

  // Constructor that takes ntheta, nphi.
  emergence(int nt, int np, T_float cd, T_float linear_fov,
	    unsigned int ysize, bool exclude_south_pole = false,
	    const vec3d& translate_origin=vec3d(0,0,0),
	    const std::string& type=rectilinear_projection::typestring_);
  // Constructor taking a list of explicit camera positions
  emergence (const std::vector<std::pair<T_float, T_float> >& cam_pos,
	     T_float cd, T_float linear_fov, unsigned int ysize,
	     const vec3d& translate_origin=vec3d(0,0,0),
	     const std::string& type=rectilinear_projection::typestring_);

  // Constructor taking an even more explicit list of camera positions
  emergence (const std::vector<vec3d>& cam_pos, 
	     const std::vector<vec3d>& cam_dir,
	     const std::vector<vec3d>& cam_up,
	     const std::vector<T_float>& fov, unsigned int ysize,
	     const vec3d& translate_origin=vec3d(0,0,0),
	     const std::string& type=rectilinear_projection::typestring_);
  // Constructor taking a vector of pre-made cameras.
  emergence (const std::vector<const T_camera*>&);
  // Constructs an emergence object from a previously saved FITS file
  emergence (const CCfits::FITS&);
  emergence (const emergence&);
  
  virtual ~emergence ();

  // Returns camera based on its number.
  const T_camera& get_camera (int i) const {
    assert(i>=0 && i<cameras.size());
    return *cameras[i];
  }; 

  T_camera& get_camera (int i) {
    assert(i>=0 && i<cameras.size());
    return *cameras[i];
  }; 

  int n_cameras () const {return cameras.size();};

  // Users have to be able to access the cameras now
  // (these don't have anything to do with the camera number)
  iterator begin () {return cameras.begin();};
  iterator end () {return cameras.end();};
  const_iterator begin () const {return cameras.begin();};
  const_iterator end () const {return cameras.end();};

  emergence& operator+= (const emergence& rhs);
  void use_lock (bool b);
  
  void write_parameters (CCfits::FITS&, const T_unit_map& units) const;

};



/** Base class for the cameras, keeping projection information that is
    independent of the data type in the pixels. The concrete camera
    classes inherit this. */
class mcrx::camera_base {
public:
  boost::shared_ptr<projection> proj_;

  /// camera image size (in pixels).
  const vec2i axis_;

  // projection variables (these are only left over because they are
  // written to the hdu, they don't really affect the projection)
  const T_float cameradist; ///< Distance from origin to camera aperture.
  const T_float theta; ///< Angular coordinate theta of camera position.
  const T_float phi;   ///< Angular coordinate phi of camera position.
  T_float intensity; ///< Total intensity in camera.
  const vec3d dir_; ///< Camera viewing direction.
  const vec3d pos_;  ///< Camera position.
  const vec3d ny_;  ///< y base vector in projection plane.
  const vec3d nx_;  ///< x base vector in projection plane.
  
  boost::mutex pixel_mutex; ///< Mutex for image array access.
  bool use_mutex;

  camera_base (T_float cd, T_float T, T_float p,
	       int ysize, const vec3d& translate_origin,
	       boost::shared_ptr<projection>& proj);
  camera_base (const vec3d& pos, const vec3d& dir, const vec3d& up,
	       int ysize, boost::shared_ptr<projection>& proj);
	       
  camera_base (const camera_base& rhs) : 
    proj_(rhs.proj_),
    axis_ (rhs.axis_), cameradist (rhs.cameradist),
    theta (rhs.theta), phi (rhs.phi), intensity (rhs.intensity),
    dir_ (rhs.dir_), ny_ (rhs.ny_), nx_ (rhs.nx_),
    pos_ (rhs.pos_), use_mutex (rhs.use_mutex) {};
  virtual ~camera_base() {};

  const std::string& type () const {return proj_->type();};
  /* Returns the area of one pixel in the normalized image plane. This
     is just 1/number_of_pixels, regardless of the projection. The
     projection appears in the area_scale, since for
     non-area-preserving projections it depends on the image
     position. */
  T_float pixel_normalized_area () const { return 1.0/product(axis_);} ;
  T_float solid_angle () const {
    return proj_->solid_angle();};
  int xsize () const {return axis_[0];};
  int ysize () const {return axis_[1];};
  void write_history (CCfits::HDU & output) const;
  T_float get_theta () const {return theta;};
  T_float get_phi() const {return phi;};
  vec3d position() const { return pos_; };
  vec3d direction() const { return dir_; };
  /** Returns camera up direction. Note that this is not necessarily
      the up vector that was supplied, this is the real vector in the
      projection plane perpendicular to direction. */
  vec3d up() const { return ny_; };

  /** Projects a position onto the image by transforming the point
      into the image frame, calling the projection object and then
      converting to a pixel position. The pixel positions are such
      that 0<=px<1 is in pixel 0, 1<=px<2 in pixel 1, etc. */
  blitz::TinyVector<int, 2> project (const vec3d& point) const {
    const vec3d dir1 = point - pos_;
    const vec3d dir = rotate(dir1, dir_, nx_, true);

    const blitz::TinyVector<T_float, 2> ix = proj_->project(dir);
    if(ix[0]==ix[0]) {
      assert(ix[0]>=0);
      assert(ix[0]<1);
      assert(ix[1]>=0);
      assert(ix[1]<1);
      return blitz::TinyVector<int, 2> (int(floor(ix[0]*axis_[0])),
					int(floor(ix[1]*axis_[1])));
    }
    else
      return blitz::TinyVector<int, 2> (-1,-1);
  };

  /** Deprojects an image position into an (outward) direction with
      the projection object, and then rotates into the frame of the
      camera. The image position is in floating-point pixels, using
      the convention as in project, i.e., 0<=px<1 is in pixel 0,
      1<=px<2 in pixel 1, and the center of pixel i is i.5.  */
  vec3d deproject (const blitz::TinyVector<T_float, 2>& px) const {
    ASSERT_ALL(px>=0);
    ASSERT_ALL(px<axis_);

    // convert to normalized coordinates and deproject
    const vec3d dir1 = proj_->deproject(px/axis_);

    const vec3d d = rotate(dir1, dir_, nx_, false);
    return d;
  };
  /** Return the area scale dOmega/dA (solid angle per area on the
      normalized image plane) for the given *incoming* direction. If the
      direction is outside the image, the value is zero. */
  T_float area_scale(const vec3d& d) const {
    const vec3d dir = rotate(d, dir_, nx_, true);

    // we need -dir to convert the incoming direction to the
    // equivalent position, which is what project uses
    const blitz::TinyVector<T_float, 2> ix = proj_->project(-dir);
    const T_float as = proj_->area_scale(ix);
    return as;
  }
  /** Return the area scale dOmega/dA (solid angle per area on the
      normalized image plane) for the given position on the normalized
      image plane. */
  T_float area_scale(const blitz::TinyVector<float, 2>& ix) const {
    return proj_->area_scale(ix); };

  /** Returns an image containing the solid angles subtended by the
      pixels in the camera. */
  array_2 pixel_solid_angle_image() const;

  void use_lock (bool b) {use_mutex = b;};
  
  // FITS routines
  void write_parameters (CCfits::ExtHDU& hdu,
                         const T_unit_map& units) const;
  void write_wcs (CCfits::ExtHDU& hdu, const T_unit_map& units) const;
};


/** Camera class for monochromatic runs, where the pixel data type is
    of type T_pixel. */
template <typename T_pixel>
class mcrx::camera: public camera_base {
  friend class mcrx::emergence<T_pixel>;
public:
  typedef blitz::Array<T_pixel, 2> T_image;
  typedef ray<T_pixel> T_ray;
  typedef typename emission<monochromatic_policy, local_random>::T_biaser T_biaser;

private:
  /** Adds the value to a pixel. For monochromatic images, the doppler
      is a no-op and is just ignored. */
  void add (const blitz::TinyVector<int, 2>& pix, 
	    const T_pixel& value, 
	    T_float doppler) {
    DEBUG(3,cout << "Adding camera max " << max(value) << endl;);
    if (pix [0] >= 0){
      // -1 is used to indicate a miss
      if (use_mutex) {
	boost::mutex::scoped_lock pixel_lock (pixel_mutex);
	pixels (pix) += value;
      }else
	pixels (pix) += value;
    }
  };

  /// The image array.
  blitz::Array<T_pixel, 2> pixels;
  boost::shared_ptr<angular_distribution<T_pixel> > distr_;

public:
  camera (T_float cd, T_float T, T_float p,
	  int ysize, const vec3d& translate_origin,
	  boost::shared_ptr<projection>& proj) :
    camera_base (cd, T, p, ysize, translate_origin, proj),
    pixels (axis_), distr_(new camera_angular_distribution<T_pixel>(*this)) {
    // initialize pixels
    pixels = 0.;};
  camera (const vec3d& pos, const vec3d& dir, const vec3d& up,
	  int ysize,
	  boost::shared_ptr<projection>& proj) :
    camera_base (pos, dir, up, ysize, proj),
    pixels (axis_), distr_(new camera_angular_distribution<T_pixel>(*this)) {
    pixels = 0.;};
  /// Copy constructor. Does NOT copy the image arrays.
  camera (const camera& rhs): 
    camera_base (rhs), pixels (axis_), 
    distr_(new camera_angular_distribution<T_pixel>(*this)) {
    pixels = 0.;};
    
  /** Adds the content*intensity to the image. The position pos is projected
      onto the image plane to determine which pixel is added to. For
      monochromatic images, the doppler parameter is a no-op. */
  void add (const vec3d& pos, T_float intensity, const T_pixel& content, 
	    T_float doppler=1.0) {
    add (project (pos), intensity*content, doppler);};

  /** Adds the content to the image. The position pos is projected
      onto the image plane to determine which pixel is added to. For
      monochromatic images, the doppler parameter is a no-op. */
  void add (const vec3d& pos, const T_pixel& content, T_float doppler=1.0 ) {
    add (project (pos), content, doppler);};

  /** Camera increment operator is used when coadding multiple cameras
      used in parallel runs. */
  camera& operator+= (const camera& rhs) {
    // if rhs is not initialized, it can't have any info. skip it
    if(rhs.pixels.size()>0) {
      if (use_mutex) {
	boost::mutex::scoped_lock pixel_lock (pixel_mutex);      
	pixels+= rhs.pixels; 
      }else
	pixels+= rhs.pixels; 
    }
    return *this;
  };
 
  /// Return camera array.
  const blitz::Array<T_pixel, 2>& get_image () const {return pixels;};
  /// Return camera array.
  blitz::Array<T_pixel, 2>& get_image () {return pixels;};

  blitz::TinyVector<int, 2> project (vec3d pos) const {return camera_base::project(pos);};

  // FITS routines
  void write_parameters (CCfits::ExtHDU& hdu,
                         const T_unit_map& units) const {
    camera_base::write_parameters (hdu, units);};
  void write_wcs (CCfits::ExtHDU& hdu, const T_unit_map& units) const {
    camera_base::write_wcs (hdu, units);};
  void load_image (const CCfits::ExtHDU& hdu);

  /** Camera emission function for monochromatic is not implemented. */
  virtual ray_distribution_pair<T_pixel> 
  emit (T_biaser, local_random& p, T_float& prob) { 
    assert(0); 
    
    return ray_distribution_pair<T_pixel> 
      (boost::shared_ptr<T_ray> 
       (new T_ray (vec3d(), vec3d (), T_pixel())), T_pixel(), distr_); };
  /** Camera scattering function for monochromatic is not implemented. */
  virtual void scatter (polychromatic_biaser, T_ray& r, local_random& rng, T_float& prob) {assert(0);};

  T_pixel zero_lambda() const {return T_pixel(0.*pixels(0,0));}; 
  std::auto_ptr<emission<monochromatic_policy, local_random> > clone() const {
    assert(0);};
  boost::shared_ptr<angular_distribution<T_pixel> > distribution() {
    return distr_; };
};


/** Partial specialization of the camera class for the case when
    T_pixel is a one-dimensional blitz array. Used for polychromatic
    runs. This camera can use optional doppler shift information to
    shift the wavelength scale of the added vectors, but this requires
    that the wavelength scale be logarithmic. */
template <typename T_pixel>
class mcrx::camera<blitz::Array<T_pixel, 1> > : public mcrx::camera_base  {
public:
  friend class emergence<blitz::Array<T_pixel,1> > ;
  typedef blitz::Array<T_pixel, 3> T_image;
  typedef blitz::Array<T_pixel,1> T_content; 
  typedef ray<T_content> T_ray;
  typedef typename emission<polychromatic_policy, local_random>::T_biaser T_biaser;

private:
  template <typename T>
  void actual_add (const blitz::TinyVector<int, 2>& pix, T_float i,
            const blitz::ETBase<T>& content, T_float doppler=1.0);
  
  /** Image array.  Instead of an array of arrays, it is a
      three-dimensional array. */
  T_image pixels;
  boost::shared_ptr<angular_distribution<T_content> > distr_;

  /** Doppler factor corresponding to one wavelength pixel. For
      kinematic calculations, the incoming SEDs are shifted according
      to the doppler factor. To do this efficiently, the wavelength
      grid must be logarithmic so the SEDs simply can be shifted. This
      quantity corresponds to the ratio between two neighboring
      wavelengths. */
  T_float dopplerscale_;

  void check_and_resize(int);

public:
  /** Constructor doesn't allocate any space in the pixels array,
      because at this point we don't know how long the value dimension
      is. */
  camera (T_float cd, T_float T, T_float p,
	  int ysize, const vec3d& translate_origin,
	  boost::shared_ptr<projection>& proj) :
    camera_base (cd, T, p, ysize, translate_origin, proj),
    distr_(new camera_angular_distribution<T_content>(*this)),
    dopplerscale_(0.0) {};
  camera (const vec3d& pos, const vec3d& dir, const vec3d& up,
	  int ysize,
	  boost::shared_ptr<projection>& proj) :
    camera_base (pos, dir, up, ysize, proj), dopplerscale_(0.0),
    distr_(new camera_angular_distribution<T_content>(*this)) {};

  /** Copy constructor. Does NOT copy the image array, but sizes it
      and sets it to zero instead. */
  camera (const camera& rhs): camera_base (rhs),
      pixels (rhs.pixels.shape()), 
      dopplerscale_(rhs.dopplerscale_),
      distr_(new camera_angular_distribution<T_content>(*this)) {
    pixels = 0;};

  /** Force the camera to allocate the pixel array. */
  void allocate(const array_1& content) {
    check_and_resize(content.size()); };

  /** Adds content*i to the specified pixel in the image. The doppler
      parameter is the fractional shift of the wavelength scale. */
  template <typename T>
  void add (const blitz::TinyVector<int, 2>& pix, T_float i,
            const blitz::ETBase<T>& content, T_float doppler=1.0);

  /** Adds content*i to the image. The position pos is projected onto
      the image plane to determine which pixel is added to. The
      doppler parameter is the fractional shift of the wavelength
      scale. */
  template <typename T>
  void add (const vec3d& pos, T_float i, const blitz::ETBase<T>& content, 
	    T_float doppler=1.0) {
    add (project (pos), i, content, doppler);};

  /** Adds content to the image. The position pos is projected onto
      the image plane to determine which pixel is added to. The
      doppler parameter is the fractional shift of the wavelength
      scale. */
  template <typename T>
  void add (const vec3d& pos, const blitz::ETBase<T>& content, 
	    T_float doppler=1.0) {
    add (project (pos), 1., content, doppler);};

  /** Camera increment operator is used when coadding multiple cameras
      used in parallel runs. */
  camera& operator+= (const camera& rhs) {
    // if rhs is not initialized, it can't have any info. skip it
    if(rhs.pixels.size()>0) {
      if (use_mutex) {
	boost::mutex::scoped_lock pixel_lock (pixel_mutex);      
	check_and_resize(rhs.pixels.extent(blitz::thirdDim));
	pixels+= rhs.pixels; 
      } else {
	check_and_resize(rhs.pixels.extent(blitz::thirdDim));
	pixels+= rhs.pixels; 
      }
    }
    return *this;
  };
  
  /// Return camera array.
  const blitz::Array<T_pixel, 3>& get_image () const {return pixels;};
  /// Return camera array.
  blitz::Array<T_pixel, 3>& get_image () {return pixels;};
  
  // FITS routines
  void write_parameters (CCfits::ExtHDU& hdu,
                         const T_unit_map& units) const {
    camera_base::write_parameters (hdu, units);};
  void write_wcs (CCfits::ExtHDU& hdu, const T_unit_map& units) const {
    camera_base::write_wcs (hdu, units);};
  void load_image (const CCfits::ExtHDU& hdu);

  /** Sets the doppler scale of the wavelength array. If the doppler
      parameter is used, this must be set before adding any signal to
      the cameras. If the wavelength scale is not strictly
      logarithmic, an exception is thrown. */
  void set_dopplerscale(const T_content& l) {
    const int nl=l.size();
    T_content r(l(blitz::Range(1,nl-1))/l(blitz::Range(0,nl-2)));
    if(max(r)/min(r)-1>1e-5) {
      std::cerr << "Wavelength scale is not logarithmic, kinematics require that!" << endl;
      throw 0;
    }
    dopplerscale_ = mean(r);
  };

  // emission routines
  virtual ray_distribution_pair<T_content> 
  emit (T_biaser, local_random& p, T_float& prob);
  virtual void scatter (polychromatic_biaser, T_ray& r, local_random& rng, T_float& prob);
  T_content zero_lambda() const {return pixels(0,0,blitz::Range::all());};
  std::auto_ptr<emission<polychromatic_policy, local_random> > clone() const {
    assert(0);};

  boost::shared_ptr<angular_distribution<T_content> > distribution() {
    return distr_; };
};


/** Check whether the camera array is allocated and if not, allocate. */
template <typename T_pixel>
void
mcrx::camera<blitz::Array<T_pixel, 1> >::
check_and_resize (int n)
{
  if (pixels.extent (blitz::thirdDim) == 0) {
    // we need to allocate the storage
    try {
      pixels.resize(xsize (), ysize (), n);
      pixels = 0;
    }
    catch (std::bad_alloc&) {
      std::cerr << "Fatal: camera::add failed allocating "
		<< xsize()*ysize()*n*8./1024/1024/1024 << "GB for "
		<< xsize() << ", " << ysize() << ", " << n << " camera array!"
		<< endl;
      throw;
    }
  }
}

/*** This function "actually" adds to the pixel. It exists to avoid
     duplication of the cases using a mutex and not. */
template <typename T_pixel>
template <typename T>
void
mcrx::camera<blitz::Array<T_pixel, 1> >::
actual_add (const blitz::TinyVector<int, 2>& pix, T_float i,
     const blitz::ETBase<T>& content, T_float doppler)
{
  const T& cc = content.unwrap();
  const int zsz= cc.ubound(0)-cc.lbound(0)+1;

  check_and_resize(zsz);
  
  // now this should be cool
  assert (pixels.extent(blitz::thirdDim) == zsz);
  ASSERT_ALL((i*cc<blitz::huge(T_float()))&&(i*cc==i*cc));

  // get and apply doppler offset, if applicable
  if(dopplerscale_>0) {
    const int offset (int(round(log(doppler)/log(dopplerscale_))));
    DEBUG(2,cout << "Adding to camera, doppler factor " << doppler << ", doppler offset "<< offset << endl;);

    // if the entire wavelength range falls outside of the sed, print warning
    if(offset>=zsz) {
      DEBUG(1,std::cerr << "Warning: Doppler offset " << doppler	\
	    << " exceeds wavelength range " <<  std::endl;);
      return;
    };
    
    /// \todo in principle the sed also changes in value, because it's
    /// being stretched or contracted in wavelength space. that effect
    /// is not included.
    if(offset>=0)
      pixels (pix [0],pix [1], blitz::Range(offset, zsz-1)) += 
	i*cc(blitz::Range(0, zsz-1-offset));
    else
      pixels (pix [0],pix [1], blitz::Range(0, zsz-1+offset)) += 
	i*cc(blitz::Range(-offset, zsz-1));
  }
  else
    pixels (pix [0],pix [1], blitz::Range::all ()) += i*cc;

  //intensity += value; what do we do about this?
  DEBUG(3,std::cout << "Image Ratio " << pixels(pix[0],pix[1],535)/pixels(pix[0],pix[1],532) << ' '<< pixels(pix[0],pix[1],615)/pixels(pix[0],pix[1],614) << endl;);
}


/** Adds the value to a pixel. This is more complicated than the
    corresponding routine for the monochromatic camera class, because
    it needs to check if the pixels array has been allocated and if
    not do so. */
template <typename T_pixel>
template <typename T>
void
mcrx::camera<blitz::Array<T_pixel, 1> >::
add (const blitz::TinyVector<int, 2>& pix, T_float i,
     const blitz::ETBase<T>& content, T_float doppler)
{
  DEBUG(3,cout << "Adding camera max " << max(i*content.unwrap()) << endl;)
  if (pix [0]< 0)
    // -1 is used to indicate a miss
    return;

  if (use_mutex) {
    // lock this entire section
    boost::mutex::scoped_lock pixel_lock (pixel_mutex);
    actual_add (pix, i, content, doppler);
  }
  else {
    actual_add (pix, i, content, doppler);
  }
}


/** Constructor that takes arguments that specify the camera positions
    as an apprximately isotropic distribution around the origin, with
    nt points over the pi radians in the polar (theta) direction and
    np over the 2pi radians in the azimuthal (phi, xy plane)
    direction. If exclude_south_pole is true, one camera is on the +z
    axis and the -z axis is empty. cd is the distance from the origin
    to the cameras, linear_fov is the linear field of view at the
    origin, ysize is the image resolution (in the y direction) of the
    cameras. The x-resolution is set according to the aspect angle of
    the projection.) If translate_origin is set, the positions of the
    cameras are shifted so they look at translate_origin instead of at
    the origin. type is the projection type to use for the cameras. */
template<typename T_image>
mcrx::emergence<T_image>::
emergence(int nt, int np, T_float cd, T_float linear_fov, 
	  unsigned int ysize, bool exclude_south_pole,
	  const vec3d& translate_origin,
	  const std::string& type)
{
  const int nim_theta = nt;
  const int nim_phi = np;

  const T_float fov = 2*atan(linear_fov/(2*cd));

  std::cout << "Creating cameras. Translating origin by " << translate_origin << std::endl;

  // create projection object
  boost::shared_ptr<projection> 
    proj(projection::projection_factory(type, fov, 1));

  // Set up projection planes for all view points

  // North polar point
  cameras.push_back(new T_camera (cd, 0, 0, ysize, 
				  translate_origin, proj));
  
  // We want the cameras spread equally in *cos* theta.  If we
  // exclude the South Pole, we distribute uniformly over the full
  // sphere which means leaving "half a hole" around the South
  // Pole.
  
  // The polar cameras get all of phi, whereas the others get
  // 1/n_phi. This affects how much costheta the polar cameras
  // should have in order to get the same solid angle
  const T_float n_theta_eff = nim_theta + 
    (exclude_south_pole? 1.0: 2.0)/nim_phi - 
    (exclude_south_pole? 1: 2);
  const T_float delta_theta = 2/n_theta_eff;

  for(int t=1;t<nim_theta-(exclude_south_pole? 0: 1);t++)
    for(int p=0;p<nim_phi;p++) {
      const T_float theta= acos(1 - delta_theta*(1./nim_phi + t - 0.5));
      const T_float phi=p*2*constants::pi/nim_phi;
      cameras.push_back(new T_camera (cd, theta, phi,
                                      ysize, translate_origin, proj));
    }

  // South polar point
  if (!exclude_south_pole) {
    cameras.push_back(new T_camera (cd, constants::pi, 0, ysize, 
				    translate_origin, proj));
  }
}


/** Constructor that takes a vector of (theta, phi) angles in
    radians. cd is the distance from the origin to the cameras,
    linear_fov is the linear field of view at the origin, ysize is the
    image resolution (in the y direction) of the cameras. The
    x-resolution is set according to the aspect angle of the
    projection.) If translate_origin is set, the positions of the
    cameras are shifted so they look at translate_origin instead of at
    the origin. type is the projection type to use for the cameras. */
template<typename T_image>
mcrx::emergence<T_image>::
emergence(const std::vector<std::pair<T_float, T_float> >& cam_pos,
	  T_float cd, T_float linear_fov, unsigned int ysize, 
	  const vec3d& translate_origin, const std::string& type)
{
  const T_float fov = 2*atan(linear_fov/(2*cd));
  std::cout << "Creating cameras. Translating origin by " << translate_origin << std::endl;

  // create projection object
  boost::shared_ptr<projection> 
    proj(projection::projection_factory(type, fov, 1));

  // Set up projection planes for all view points in the list
  for (std::vector<std::pair<T_float, T_float> >::const_iterator
	 i = cam_pos.begin(); i != cam_pos.end(); ++i) {
    cameras.push_back(new T_camera (cd, i->first,
				    i->second, ysize, translate_origin, 
				    proj));
  }
}


/** Constructor that takes fully specified setups for the cameras, as
    vectors of positions, directions, up-vectors (+y direction on
    image plane) and angular fields of view. ysize is the image
    resolution (in the y direction) of the cameras. The x-resolution
    is set according to the aspect angle of the projection.) If
    translate_origin is set, the positions of the cameras are shifted
    so they look at translate_origin instead of at the origin. type is
    the projection type to use for the cameras. */
template<typename T_image>
mcrx::emergence<T_image>::
emergence (const std::vector<vec3d>& cam_pos, 
	   const std::vector<vec3d>& cam_dir,
	   const std::vector<vec3d>& cam_up,
	   const std::vector<T_float>& fov, unsigned int ysize,
	   const vec3d& translate_origin,
	   const std::string& type)
{
  std::cout << "Creating cameras. Translating origin by " << translate_origin << std::endl;

  std::vector<vec3d>::const_iterator pi = cam_pos.begin();
  std::vector<vec3d>::const_iterator di = cam_dir.begin();
  std::vector<vec3d>::const_iterator ui = cam_up.begin();
  std::vector<T_float>::const_iterator fi = fov.begin();
  for (; pi != cam_pos.end(); ++pi,++di,++ui,++fi) {
    assert (di != cam_dir.end());
    assert (ui != cam_up.end());
    assert (fi != fov.end());
    const vec3d pos(*pi-translate_origin);

    // create projection object
    boost::shared_ptr<projection> 
      proj(projection::projection_factory(type, *fi, 1));


    cameras.push_back(new T_camera (pos, *di, *ui, ysize, proj));
  }
}


/** Constructor that takes a vector of pre-made camera objects, which
    are just copied into the object. */
template<typename T_image>
mcrx::emergence<T_image>::
emergence (const std::vector<const T_camera*>& cams)
{
  for(int c=0; c<cams.size(); ++c)
    cameras.push_back(new T_camera (*cams[c]));
}


/** Copy constructor copies the camera setup (but NOT their contents
    which are initialized to zero). */
template<typename T_image>
mcrx::emergence<T_image>::
emergence(const emergence& rhs)
{
  // just copy all cameras (This does NOT copy the image arrays)
  for (const_iterator i = rhs.cameras.begin(); i != rhs.cameras.end(); ++i) {
    cameras.push_back(new T_camera (**i));
  }
}

template<typename T_image>
mcrx::emergence<T_image>::~emergence ()
{
  for(typename std::vector<T_camera*>::iterator k = cameras.begin();
      k != cameras.end(); ++k)
    delete *k;
}

/** Adds the values in the cameras in the rhs emergence object to the
    cameras in this. Used to add the image arrays in different
    emergence objects together (when multithreading). */
template<typename T_image>
mcrx::emergence<T_image>&
mcrx::emergence<T_image>::operator+= (const emergence& rhs)
{
  // We want to ensure we are operating on the same camera NUMBER,
  // regardless is whether they are in different positions in the
  // vectors.
  for (int c=0; c < n_cameras(); ++c) { 
    *(cameras [c]) += rhs.get_camera(c);
  }
  return *this; 
} 


/** Specifies whether the cameras should use mutex locking when adding
    to the image arrays. If several threads are working on the same
    emergence object this must be set to true to avoid corruption. */
template<typename T_image>
void mcrx::emergence<T_image>::use_lock (bool b)
{
  for(typename std::vector<T_camera*>::iterator k = cameras.begin();
      k != cameras.end(); ++k)
    (*k)->use_lock(b );
}

template <typename T_pixel>
void
mcrx::camera<blitz::Array<T_pixel, 1> >::
scatter (polychromatic_biaser, T_ray& ray, local_random& rng, T_float& prob)
{
  // we draw a random position on the image plane and deproject it
  blitz::TinyVector<T_float, 2> r;
  rng.rnd(r);

  // XXX deprojection can fail because all image coords are not valid
  // (ie aitoff), so we need to do rejection sampling here
  const vec3d d=deproject(r*axis_);

  // probability is the probability density wrt solid angle. This is
  // dP/dOmega = dP/dA * dA/dOmega, where dP/dA is the density wrt the
  // area on the normalized image plane (=1), and dA/dOmega is
  // 1/area_scale.  We get this directly from the projection object
  // using the normalized image coordinates.
  prob = 1./proj_->area_scale(r);

  ray.set_direction(d);
};

template <typename T_pixel>
mcrx::ray_distribution_pair<blitz::Array<T_pixel, 1> > 
mcrx::camera<blitz::Array<T_pixel, 1> >::
emit (polychromatic_biaser, local_random& rng, T_float& prob)
{
  //and create a ray with a unity intensity
  T_content unity(pixels.extent(blitz::thirdDim));
  unity=1;
  boost::shared_ptr<T_ray> r(new  T_ray (pos_, vec3d(0,0,1), unity));
  
  scatter(polychromatic_biaser(), *r, rng, prob);

  return ray_distribution_pair<T_content> (r, unity, distr_);
}
 
 
#endif
