/*
    Copyright 2006-2011 Patrik Jonsson, sunrise@familjenjonsson.org

    This file is part of Sunrise.

    Sunrise is free software; you can redistribute it and/or modify
    it under the terms of the GNU General Public License as published by
    the Free Software Foundation; either version 3 of the License, or
    (at your option) any later version.

    Sunrise is distributed in the hope that it will be useful,
    but WITHOUT ANY WARRANTY; without even the implied warranty of
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    GNU General Public License for more details.

    You should have received a copy of the GNU General Public License
    along with Sunrise.  If not, see <http://www.gnu.org/licenses/>.

*/

/// \file
/// Definition of the aux_emergence class.

// $Id$

#include "config.h"
#include "aux_emergence.h"
#include "emergence-fits.h"
#include "CCfits/CCfits"
#include <sstream> 
#include "biniostream.h"
#include "fits-utilities.h"
#include "hpm.h"
#include "constants.h"
#include "boost/lexical_cast.hpp"

using boost::lexical_cast;
using namespace blitz;

void mcrx::write_aux_keywords(CCfits::HDU* hdu, const T_unit_map& u, 
			      T_float sd_factr)
{
  assert(sd_factr > 0);

  hdu->addKey("sd_factr", sd_factr,
	      "Conversion factor from internal units to surface density");
  hdu->addKey("imunit", "Q/kpc^2",
	      "All extensive quantities are given as surface densities");

  // this encoding is done by the aux_grid constructor
  hdu->addKey("MG_SLICE", aux_pars_fields::mass_gas+1, 
	      "Gas mass is slice "+
	      lexical_cast<string>(aux_pars_fields::mass_gas+1));
  hdu->addKey("MG_UNIT", u.get("mass")+"/"+u.get("length")+"^2", "Gas mass unit");
  hdu->addKey("MM_SLICE", aux_pars_fields::mass_metals+1, 
	      "Metal mass is slice "+
	      lexical_cast<string>(aux_pars_fields::mass_metals+1));
  hdu->addKey("MM_UNIT", u.get("mass")+"/"+u.get("length")+"^2", "Metal mass unit");
  hdu->addKey("SFRSLICE", aux_pars_fields::SFR+1, "SFR is slice "+
	      lexical_cast<string>(aux_pars_fields::SFR+1));
  hdu->addKey("SFR_UNIT", u.get("mass")+"/"+u.get("time")+"/"+u.get("length")+"^2",
	      "SFR unit");
  hdu->addKey("GASTSLIC", aux_pars_fields::energy+1,
	      "Gas mass-weighted temperature is slice "+
	      lexical_cast<string>(aux_pars_fields::energy+1));
  hdu->addKey("GASTUNIT", u.get("temperature"), "Temperature unit");
  hdu->addKey("MS_SLICE", aux_pars_fields::mass_stars+1,
	      "Stellar mass is slice "+
	      lexical_cast<string>(aux_pars_fields::mass_stars+1));
  hdu->addKey("MS_UNIT", u.get("mass")+"/"+u.get("length")+"^2", 
	      "Stellar mass unit");
  hdu->addKey("MMSSLICE", aux_pars_fields::mass_metals_stars+1, 
	      "Metals in stars is slice "+
	      lexical_cast<string>(aux_pars_fields::mass_metals_stars+1));
  hdu->addKey("MMS_UNIT", u.get("mass")+"/"+u.get("length")+"^2", 
	      "Stellar metal mass unit");
  hdu->addKey("LB_SLICE", aux_pars_fields::L_bol +1, "L_bol is slice "+
	      lexical_cast<string>(aux_pars_fields::L_bol+1));
  hdu->addKey("LB_UNIT", u.get("luminosity")+"/"+u.get("length")+"^2", "L_bol unit");
  hdu->addKey("AGEMSLIC", aux_pars_fields::age_m+1,
	      "Mass-weighted stellar age is slice "+
	      lexical_cast<string>(aux_pars_fields::age_m+1));
  hdu->addKey("AGEMUNIT", u.get("time"), "Age unit");
  hdu->addKey("AGELSLIC", aux_pars_fields::age_l+1,
	      "Luminosity-weighted stellar age is slice "+
	      lexical_cast<string>(aux_pars_fields::age_l+1));
  hdu->addKey("AGELUNIT", u.get("time"), "Age unit");
}


/** Writes the camera images to a FITS file.  The images are written
    as data cubes in HDU's CAMERAi_AUX in the specified file. */
void mcrx::aux_grid_emergence::write_images (CCfits::FITS& file,
					T_float normalization,
					const T_unit_map& units) const 
{
  using namespace CCfits;
  using namespace blitz;
  assert(normalization > 0);

  T_unit_map& u = const_cast<T_unit_map&> (units);

  const T_image temporary_tinyvector; // because length() is nonstatic (?)
  const int ncomp=temporary_tinyvector.length();

  assert (n_cameras_loaded());

  for (T_camera_map::const_iterator i = camera_numbers.begin();
       i != camera_numbers.end(); ++i) {
    const int camera_number = i->first;
    const T_camera& c = get_camera (camera_number);

    const T_camera::T_image& image = get_image (camera_number);
    assert  (image.size() > 0);

    std::ostringstream ost;
    ost << "CAMERA" << camera_number << "-AUX"; 
    cout << ost.str()<< endl;
    
    // check if HDU already exists
    ExtHDU* hdu;    
    try {
      hdu = &open_HDU (file, ost.str ());
      // it would probably be good to check that image dimensions are
      // consistent
      std::cout << "updating existing HDU " << ost.str()<< endl;
    }
    catch (CCfits::FITS::NoSuchHDU&) {
      std::vector<long> naxes;
      naxes.push_back(image.extent(firstDim ) );
      naxes.push_back(image.extent(secondDim ) );
      naxes.push_back(ncomp); // Number of components in tinyvector
      hdu = file.addImage (ost.str(), DOUBLE_IMG, naxes);

      hdu->writeComment("This HDU contains the images of the auxiliary quantities.  The third dimension runs over the different quantities, see the xxxSLICE keywords for which is which.");
    }
    
    // write wcs keywords
    c.write_wcs(*hdu, units);

    // write units
    // convert image values to "surface density"
    string sd_unit;
    double sd_factr;
 
    // convert to surface density by multiplying the image values by
    // 4pi/omega_pix. (this is indep of our actual units)
    sd_factr = 4*constants::pi/c.pixel_solid_angle ();
    assert(sd_factr > 0);

    hdu->addKey("normalization", normalization,
		"Image normalization (internal usage)");

    write_aux_keywords(hdu, u, sd_factr);

    // the "multicomponent" array always has the "component" as the
    // smallest stride.  This is the opposite of the FITS data cubes.
    // This means we need to write component by component.
     
    // Write image array component by component
    for (int j = 0; j < aux_pars_fields::FIR; ++j) {
      // the const cast is necessary because the multicomponent
      // operator [] does not have a const version...
      assert (all (const_cast<T_camera::T_image&> (image)[j] <
		   blitz::huge (T_float ())));
      assert (all (const_cast<T_camera::T_image&> (image)[j] >= 0 ));

      // We have storage order issues here, FITS images are column major      
      array_2 temp_image (image.shape(), ColumnMajorArray<2> ());
      assert(temp_image.isStorageContiguous());

      // unweight necessary quantities
      switch (j) {
      case aux_pars_fields::age_m:
	// mass-weighted age, divide by stellar mass
	temp_image= 
	  const_cast<T_camera::T_image&> (image)[int(aux_pars_fields::age_m)]/
	  const_cast<T_camera::T_image&> (image)[int(aux_pars_fields::mass_stars)];
	break;
      case aux_pars_fields::age_l:
	// luminosity-weighted age, divide by bolometric luminosity
	temp_image=
	  const_cast<T_camera::T_image&> (image)[int (aux_pars_fields::age_l)]/
	  const_cast<T_camera::T_image&> (image)[int (aux_pars_fields::L_bol)];
	break;
      case aux_pars_fields::energy :
	// gas temp is mass-weighted, so divide by gas mass
	temp_image= 
	  const_cast<T_camera::T_image&> (image)[int (aux_pars_fields::energy)]/
	  const_cast<T_camera::T_image&> (image)[int (aux_pars_fields::mass_gas)];
	break;
      default:
	// for all other quantities, just normalize
	temp_image = (normalization*sd_factr)*
	  const_cast<T_camera::T_image&> (image)[j];
      };

      std::valarray<T_float> temp (temp_image.dataFirst(), temp_image.size());
      
      // write the proper slice of the data cube
      std::vector<long> begin, end;
      begin.push_back(1 );
      begin.push_back(1 );
      begin.push_back(j+1);
      end.push_back(temp_image.extent(firstDim ) );
      end.push_back(temp_image.extent(secondDim) );
      end.push_back(j + 1  );

      hdu->write(begin, end, temp );
      }

  }
}


/** Loads images from a FITS file.  The camera image arrays are loaded
    from the CAMERAi-AUX HDU's. */
void mcrx::aux_grid_emergence::load_images (const CCfits::FITS& file)
{
  using namespace blitz;

  for (T_camera_map::iterator i = camera_numbers.begin();
       i != camera_numbers.end(); ++i) {

    const int camera_number = i->first;
    T_camera& c = get_camera (camera_number);

    std::ostringstream ost;
    ost << "CAMERA" << camera_number << "-AUX";
    cout << ost.str()<< endl;
    const CCfits::ExtHDU& hdu = open_HDU (file, ost.str() );
    assert (hdu.axis(0) == c.xsize ());
    assert (hdu.axis(1 ) == c.ysize ());

    // read the image hdu directly into the camera array
    T_camera::T_image& image = get_image (camera_number);
    read(hdu, image);

    // check for a normalization keyword
    float normalization = 1 ;
    float sb_factr = 1;
    try {
      const_cast<CCfits::ExtHDU&> (hdu).readKey (string ("normalization"),
						 normalization);
    }
    catch (...) {}// CCfits::HDU::NoSuchKeyword&) {}
    try {
      const_cast<CCfits::ExtHDU&> (hdu).readKey (string ("sb_factr"),
						sb_factr);
    }
    catch (...) {}// CCfits::HDU::NoSuchKeyword&) {}

    // restore normalization of image
    image *= 1/(normalization*sb_factr) ;

  }
}


/** Dump the image arrays to a temporary dump file.  The main purpose
    here is to be fast, since we have gotten a termination signal. */
void mcrx::aux_grid_emergence::write_dump (binofstream& file) const
{
  const T_image temporary_tinyvector; // because length() is nonstatic (?)
  const int ncomp=temporary_tinyvector.length();

  for (T_camera_map::const_iterator i = camera_numbers.begin();
       i != camera_numbers.end(); ++i) {
    const int camera_number = i->first;

    const T_camera::T_image& image = get_image (camera_number);
    // We need to save the image size as well because it's not
    // allocated until the first ray is shot
    TinyVector<int, 2> extent = image.shape();
    file << extent [0]
	 << extent [1]
	 << ncomp;

    file.write(reinterpret_cast<const char*> (image.dataFirst()),
	       image.storageSize()*sizeof (T_camera::T_image::T_numtype)*
	       ncomp );
  } 
}

/** Load the image arrays from a temporary dump file. */
bool mcrx::aux_grid_emergence::load_dump (binifstream& file) 
{
  const T_image temporary_tinyvector; // because length() is nonstatic (?)
  const int ncomp=temporary_tinyvector.length();

  for (T_camera_map::iterator i = camera_numbers.begin();
       i != camera_numbers.end(); ++i) {
    // for (int i = 0; i < cameras.size(); ++i) {
    const int camera_number = i->first;
    T_camera& c = get_camera (camera_number);

    T_camera::T_image& image = get_image (camera_number);
    TinyVector< int, 2> size;
    int zsize;
    file >> size [0]
	 >>  size [1]
	 >>  zsize;
    assert (size [0] == c.xsize ());
    assert (size [1] == c.ysize ());
    assert (zsize = ncomp);
    image.resize(size );
    assert(image.isStorageContiguous());
    file.read (reinterpret_cast<char*> (image.dataFirst()),
	       image.storageSize()*sizeof (T_camera::T_image::T_numtype)*
	       ncomp);
  }

  char c;
  return (file.good() && (file.get(c ).eof()));
}




/** Writes the camera images to a FITS file.  The images are written
    as data cubes in HDU's CAMERAi_AUX in the specified file. This
    only writes the particle data. */
void mcrx::aux_particle_emergence::write_images (CCfits::FITS& file,
					T_float normalization,
					const T_unit_map& units) const 
{
  using namespace CCfits;
  using namespace blitz;
  assert(normalization > 0);

  T_unit_map& u = const_cast<T_unit_map&> (units);

  const T_image temporary_tinyvector; // because length() is nonstatic (?)
  const int ncomp=temporary_tinyvector.length();

  assert (n_cameras_loaded());

  for (T_camera_map::const_iterator i = camera_numbers.begin();
       i != camera_numbers.end(); ++i) {

    const int camera_number = i->first;
    const T_camera& c = get_camera (camera_number);

    const T_camera::T_image& image = get_image (camera_number);
    assert  (image.size() > 0);

    std::ostringstream ost;
    ost << "CAMERA" << camera_number << "-AUX"; 
    cout << ost.str()<< endl;
    
    // check if HDU already exists
    ExtHDU* hdu;    
    try {
      hdu = &open_HDU (file, ost.str ());
      // it would probably be good to check that image dimensions are
      // consistent
      std::cout << "updating existing HDU " << ost.str()<< endl;
    }
    catch (CCfits::FITS::NoSuchHDU&) {
      std::vector<long> naxes;
      naxes.push_back(image.extent(firstDim ) );
      naxes.push_back(image.extent(secondDim ) );
      naxes.push_back(ncomp); // Number of components in tinyvector
      hdu = file.addImage (ost.str(), DOUBLE_IMG, naxes);

      hdu->writeComment("This HDU contains the images of the auxiliary quantities.  The third dimension runs over the different quantities, see the xxxSLICE keywords for which is which.");
    }
    
    // write wcs keywords
    c.write_wcs(*hdu, units);

    // write units
    // convert image values to "surface density"
    double sd_factr;
 
    // convert to surface density by multiplying the image values by
    // 4pi/omega_pix. (this is indep of our actual units)
    sd_factr = 4*constants::pi/c.pixel_solid_angle ();
    assert(sd_factr > 0);

    hdu->addKey("particle_normalization", normalization,
		"Normalization for particle images (internal usage)");

    write_aux_keywords(hdu, u, sd_factr);

    // the "multicomponent" array always has the "component" as the
    // smallest stride.  This is the opposetite of the FITS data cubes.
    // This means we need to write component by component.
     
    // Write image array component by component.  Note that we only
    // write the components we get from the stellar particles.  The
    // gas components are left alone.
    for (int j = aux_pars_fields::mass_stars; j < aux_pars_fields::FIR; ++j) {
      // the const cast is necessary because the multicomponent
      // operator [] does not have a const version...
      assert (all (const_cast<T_camera::T_image&> (image)[j] <
		   blitz::huge (T_float ())));
      assert (all (const_cast<T_camera::T_image&> (image)[j] >= 0 ));

      // We have storage order issues here, FITS images are column major      
      array_2 temp_image (image.shape(), ColumnMajorArray<2> ());
      assert(temp_image.isStorageContiguous());

      // unweight necessary quantities
      switch (j) {
      case aux_pars_fields::age_m:
	// mass-weighted age, divide by stellar mass
	temp_image= 
	  const_cast<T_camera::T_image&> (image)[int(aux_pars_fields::age_m)]/
	  const_cast<T_camera::T_image&> (image)[int(aux_pars_fields::mass_stars)];
	break;
      case aux_pars_fields::age_l:
	// luminosity-weighted age, divide by bolometric luminosity
	temp_image= 
	  const_cast<T_camera::T_image&> (image)[int(aux_pars_fields::age_l)]/
	  const_cast<T_camera::T_image&> (image)[int(aux_pars_fields::L_bol)];
	break;
      default:
	// for all other quantities, just normalize
	temp_image = (normalization*sd_factr)*
	  const_cast<T_camera::T_image&> (image)[j];
      };

      std::valarray<T_float> temp (temp_image.dataFirst(), temp_image.size());
      
      // write the proper slice of the data cube
      std::vector<long> begin, end;
      begin.push_back(1 );
      begin.push_back(1 );
      begin.push_back(j+1);
      end.push_back(temp_image.extent(firstDim ) );
      end.push_back(temp_image.extent(secondDim) );
      end.push_back(j + 1  );

      hdu->write(begin, end, temp );
      }

  }
}


/** Loads images from a FITS file.  The camera image arrays are loaded
    from the CAMERAi-AUX HDU's. */
void mcrx::aux_particle_emergence::load_images (const CCfits::FITS& file)
{
  using namespace blitz;

  for (T_camera_map::iterator i = camera_numbers.begin();
       i != camera_numbers.end(); ++i) {

    const int camera_number = i->first;
    T_camera& c = get_camera (camera_number);

    std::ostringstream ost;
    ost << "CAMERA" << camera_number << "-AUX";
    cout << ost.str()<< endl;
    const CCfits::ExtHDU& hdu = open_HDU (file, ost.str() );
    assert (hdu.axis(0) == c.xsize ());
    assert (hdu.axis(1 ) == c.ysize ());

    // read the image hdu directly into the camera array
    T_camera::T_image& image = get_image (camera_number);
    read(hdu, image);

    // check for a normalization keyword
    float normalization = 1 ;
    float sb_factr = 1;
    try {
      const_cast<CCfits::ExtHDU&> (hdu).readKey (string ("particle_normalization"),
						 normalization);
    }
    catch (...) {}// CCfits::HDU::NoSuchKeyword&) {}
    try {
      const_cast<CCfits::ExtHDU&> (hdu).readKey (string ("sb_factr"),
						sb_factr);
    }
    catch (...) {}// CCfits::HDU::NoSuchKeyword&) {}

    // restore normalization of image
    image*= 1/(normalization*sb_factr) ;
  }
}

/** Dump the image arrays to a temporary dump file.  The main purpose
    here is to be fast, since we have gotten a termination signal. */
void mcrx::aux_particle_emergence::write_dump (binofstream& file) const
{
  const T_image temporary_tinyvector; // because length() is nonstatic (?)
  const int ncomp=temporary_tinyvector.length();

  for (T_camera_map::const_iterator i = camera_numbers.begin();
       i != camera_numbers.end(); ++i) {
    const int camera_number = i->first;

    const T_camera::T_image& image = get_image (camera_number);
    // We need to save the image size as well because it's not
    // allocated until the first ray is shot
    TinyVector<int, 2> extent = image.shape();
    file << extent [0]
	 << extent [1]
	 << ncomp;

    assert(image.isStorageContiguous());
    file.write(reinterpret_cast<const char*> (image.dataFirst()),
	       image.storageSize()*sizeof (T_camera::T_image::T_numtype)*
	       ncomp );
  } 
}

/** Load the image arrays from a temporary dump file. */
bool mcrx::aux_particle_emergence::load_dump (binifstream& file) 
{
  const T_image temporary_tinyvector; // because length() is nonstatic (?)
  const int ncomp=temporary_tinyvector.length();

  for (T_camera_map::iterator i = camera_numbers.begin();
       i != camera_numbers.end(); ++i) {

    const int camera_number = i->first;
    T_camera& c = get_camera (camera_number);

    T_camera::T_image& image = get_image (camera_number);
    TinyVector< int, 2> size;
    int zsize;
    file >> size [0]
	 >>  size [1]
	 >>  zsize;

    assert (size [0] == c.xsize ());
    assert (size [1] == c.ysize ());
    assert (zsize = ncomp);
    image.resize(size );
    assert(image.isStorageContiguous());
    file.read (reinterpret_cast<char*> (image.dataFirst()),
	       image.storageSize()*sizeof (T_camera::T_image::T_numtype)*
	       ncomp);
  }

  char c;
  return (file.good() && (file.get(c ).eof()));
}




