/*
  Copyright 2006-2011 Patrik Jonsson, sunrise@familjenjonsson.org

  This file is part of Sunrise.

  Sunrise is free software; you can redistribute it and/or modify
  it under the terms of the GNU General Public License as published by
  the Free Software Foundation; either version 3 of the License, or
  (at your option) any later version.

  Sunrise is distributed in the hope that it will be useful,
  but WITHOUT ANY WARRANTY; without even the implied warranty of
  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  GNU General Public License for more details.

  You should have received a copy of the GNU General Public License
  along with Sunrise.  If not, see <http://www.gnu.org/licenses/>.
  
*/

// $Id$

#include<iostream>
#include "CCfits/CCfits"
#include "counter.h"
#include "polychromatic_grid.h"
#include "optical.h"
#include "scatterer.h"
#include "constants.h"
#include "dust_model.h"
#include "emission.h"
#include "emission-fits.h"
#include "full_sed_emergence.h"
#include "full_sed_grid.h"
#include "emergence-fits.h"
#include "mcrx-units.h"
#include "grid_factory.h"
#include "grid-fits.h"
#include "shoot.h"
#include "fits-utilities.h"
#include "boost/shared_ptr.hpp"
#include "density_generator.h"
#include "preferences.h"
#include "model.h"
#include "wd01_Brent_PAH_grain_model.h"
#include "wd01_grain_model.h"
#include "xfer.h"
#include "ir_grid.h"
#include "equilibrium.h"

using namespace mcrx;
using namespace CCfits;
using namespace std; 

typedef local_random T_rng_policy;
typedef scatterer<polychromatic_scatterer_policy, T_rng_policy> T_scatterer;
typedef T_scatterer::T_lambda T_lambda;
typedef T_scatterer::T_biaser T_biaser;
typedef dust_model<T_scatterer, cumulative_sampling, T_rng_policy> T_dust_model;
typedef full_sed_emergence T_emergence;

typedef full_sed_grid<adaptive_grid> T_grid;
typedef ir_grid<adaptive_grid, cumulative_sampling, local_random> T_ir_grid;
typedef emission<polychromatic_policy, T_rng_policy> T_emission;

// at some point modify this to make it resolve surface of sphere better;
// see WG test for this
class uniform_factory: public grid_factory<T_grid::T_data> {
public:
  const T_densities rho_, n_tol_;
  const T_float rmax_;
  const int n_lambda;

  T_densities get_rho(const T_cell& c) {
    // return 0 if outside of radius rmax
    T_densities rho = (dot(c.getcenter(), c.getcenter())< rmax_*rmax_) ? rho_ : T_densities(rho_*0.);
    return rho;
  };

  uniform_factory (const T_densities& rho, T_densities n_tol, 
		   int nl, T_float rmax):
    rho_ (rho), n_lambda (nl), n_tol_(n_tol), rmax_(rmax) {};
  
  virtual bool refine_cell_p (const T_cell& c, int level) {
    // cell is refined to keep column density below n_tol
    T_densities rho = get_rho(c);
    T_densities n2 (dot(c.getsize(), c.getsize())*rho*rho);
    bool refine = all( n2 > (n_tol_*n_tol_));
    //if(!refine) cout << "No refinement level " << level << " at " << c.getcenter() << c.getsize() << endl;
    return refine;
  };
  virtual bool unify_cell_p (T_grid::const_local_iterator begin,
			     T_grid::const_local_iterator end) {
    return false;
  };
  virtual T_data get_data (const T_cell& c) {
    return T_data(get_rho(c), vec3d(0,0,0), T_lambda(n_lambda));
  }
    
  virtual int n_threads () {return 1;};
  virtual int work_chunk_levels () {return 5;};
  virtual int estimated_levels_needed () {return 0;};
};

/*
class const_sed {

private:
  T_float L_lambda_;

public:
  const_sed(T_float L_lam): L_lambda_(L_lam) {};

  T_float emission(T_float lambda) const {
    return L_lambda();
  };

  array_1 emission(const array_1& lambda) const {
    array_1 e(lambda.size());
    for(int i=0; i<lambda.size(); ++i)
      e(i) = L_lambda();
    return e;
  };

  T_float L_lambda () const {return L_lambda_;};
};
*/


void ir_thick_test ()
{
  const int n_threads = 8;
  const long n_rays =1000;
  const int n_rays_temp = 100000;
  const int grid_n = 10;
  const T_float grid_extent=5;
  const int n_forced=1;
  //we set this below since we are manually specifying a dust model
  //const int vis_ref_lambda = 17;
  //const int vis_ref_lambda = 1;
  const int ir_ref_lambda = 33;
  const T_float rel_tol =1e-2;
  const T_float abs_tol =1e-6;
  const int n_theta = 3;
  //  const int n_theta = 8;
  const int n_phi = 3;
//  const int n_phi = 14;
  const T_float cam_dist = 100;

  // number of values for opacity, albedo, and asymmetry to be used
  const int n_kk = 3;
  const int n_aa = 3;
  const int n_gg = 1;

  // maximum and minimum values for the opacity and albedo (asymmetry constant
  // for now)
  const float kk_min = 1.;
  const float kk_max = 100.;
  const float aa_min = 0.;
  const float aa_max = 1.;

  // luminosity of constant SED
  //const T_float L_lambda = 4.e26*1.e7;
  const T_float L_lambda = 1.e33;
  
  // 4.15e5 Msun/kpc^3 means unit opacity with the WD01 dust
  T_densities rho(1);
  //rho=4.15e5/grid_extent*100; // total optical depth 100
  // since we manually specify grain model and we want kappa = tau, we
  // specify rho = 1/d because tau = kappa*rho*d
  rho = 1./grid_extent;

  T_densities n_tol(1);
  //n_tol = 4.15e5*8; // this is ~optical depth 1 in vis for WD01 dust model
  n_tol = 1.; // this is optical depth ~1 when kappa = 1

  mcrx::seed(42);
  T_unit_map units;
  units ["length"] = "m";
  units ["mass"] = "Msun";
  units ["luminosity"] = "W";
  units ["wavelength"] = "m";
  units ["L_lambda"] = "W/m";


  /* when we manually specify dust model we make lambdas bullshit
  
  // read wavelengths
  ifstream lfile("/n/home/chayward/sunrise/test/pascucci.wavelengths");
  //ifstream lfile("/n/home/chayward/sunrise/test/two.wavelengths");
  vector<T_float> lambdas((istream_iterator<float>(lfile)),
			istream_iterator<float>());
  array_1 lambda(&lambdas[0], TinyVector<int,1>(lambdas.size()), neverDeleteData);
  */
  

  Preferences p;

  /*
  // load stellarmodel
  p.setValue("stellarmodelfile", string("~patrik/dust_data/stellarmodel/Patrik-imfKroupa-Zmulti-ml.fits"));
  p.setValue("min_wavelength", 91.0e-9);
  p.setValue("max_wavelength", 1e-3);
  p.setValue("mappings_sed_file", string("~patrik/mappings/Smodel.fits"));
  p.setValue("mappings_pdr_fraction", 0.2);
  p.setValue("mappings_mcl", 1e7);
  p.setValue("use_mappings", false);
  mappings_sb99_model sb99(p);
  */

  p.setValue("grain_data_directory", string("/n/data/hernquist_lab/chayward/sundata/dust_data/crosssections"));
  p.setValue("wd01_parameter_set", string("DL07_MW3.1_60"));
  p.setValue("use_dl07_opacities", true);

  // load grain model
  T_dust_model::T_scatterer_vector sv;
//  sv.push_back(boost::shared_ptr<T_scatterer> 
//	       (new wd01_grain_model<polychromatic_scatterer_policy, mcrx_rng_policy>(p,units)));


  // **** begin manual specification of dust model ****
/*
  // manually specify opacity, albedo, and asymetry parameter
  const int n_lambda = n_kk*n_aa*n_gg;
  T_lambda kk(n_lambda);
  T_lambda aa(n_lambda);
  T_lambda gg(n_lambda);
  T_lambda lambda(n_lambda);
  // pick a reference wavelength in the middle of the grid
  const int vis_ref_lambda = int(n_kk/2.+0.5)*int(n_aa/2.+0.5)+int(n_gg/2.+0.5);
  cout << "Position of visible reference wavelength in array: "
       << vis_ref_lambda << endl;

  // wavelength values don't matter if we manually specify dust model
  lambda = 1.;

  // keep g constant
  gg = 0.5;

  // do grid of opacities and albedos
  const float kk_step = (kk_max-kk_min)/(n_kk-1);
  const float aa_step = (aa_max-aa_min)/(n_aa-1);

  for (int k=0;k<n_kk;++k)
    for (int a=0;a<n_aa;++a) {
      kk(k*n_aa + a) = kk_min + kk_step*k;
      aa(k*n_aa + a) = aa_min + aa_step*a;
  }

  // output what opacities, albedos, and asymmetry parameters will be
  // used
  cout << "Using " << n_kk << " values for opacity ranging from " << kk_min
       << " to " << kk_max << " in steps of " << kk_step << endl;
  cout << "Using " << n_aa << " values for albedo ranging from " << aa_min
       << " to " << aa_max << " in steps of " << aa_step << endl;
  cout << "Using constant asymmetry parameter " << gg(0) << endl;

*/

  // this bit is used to set albedo to 0 to check energy conservation for
  // absorption only
  T_lambda lambda(3);
  lambda = 1.;
  const int vis_ref_lambda = 1;
  // it doesn't seem to like this, so we'll try this
  //for (int l=0;l<lambda.size();++l)
  //  lambda(l) = l*1.e-7;
  T_lambda kk(lambda.size()); kk = 100.;
  // set albedo to 0 for no scattering
  T_lambda aa(lambda.size()); aa = 0.;
  T_lambda gg(lambda.size()); gg = 0.5;
  

  sv.push_back(boost::shared_ptr<T_scatterer>
               (new simple_HG_dust_grain<polychromatic_scatterer_policy,
                mcrx_rng_policy> (kk,gg,aa)));
  // **** end manual specification of dust model ****


  T_dust_model model (sv);
  model.set_wavelength(lambda);

  // build grid
  cout << "building grid" << endl;
  uniform_factory factory (rho,n_tol, lambda.size(), grid_extent);
  // first create the adaptive grid
  boost::shared_ptr<adaptive_grid<T_grid::T_data> > basegrid
    (new adaptive_grid<T_grid::T_data>  
     (vec3d (-grid_extent,-grid_extent,-grid_extent),
      vec3d (grid_extent,grid_extent,grid_extent),
      vec3i (grid_n, grid_n,grid_n), factory));

  // and then the full grid
  T_grid gr(basegrid, units);

  cout << "setting up emission" << endl;
  //blackbody bb(5800,3.91e26*1e7);

  //define source with constant SED
  array_1 cs(lambda.size());
  cs = 1.;
  /* Since we manually specify dust model we don't care about units;
  this may cause some underflow, though, so we should check this
  cs(0) = L_lambda;
  cs(1) = L_lambda;
  T_float L_bol_src = integrate_quantity(cs, lambda);


  cout << "Source bolometric luminosity: " << L_bol_src << " W \n";
  */


  auto_ptr<T_emission> em;

// create floodlight on x-axis at edge of grid; make it point in -z direction 
// and have radius 2 times size of grid
  em.reset(new floodlight_emission<polychromatic_policy, T_rng_policy> (vec3d
  	   (0., 0., grid_extent), vec3d (0.0, 0.0, -1.0), 2.0*grid_extent,
           cs));
  //em.reset(new floodlight_emission<polychromatic_policy, T_rng_policy> (vec3d
  //         (5.0,0.1,0.1), vec3d (-1.0, 0.0, 0.0), 10.,cs.emission(lambda)));
  //cout << bb.emission(lambda); 
  cout << cs;
  cout << "setting up emergence" << endl;
  std::vector<std::pair<T_float, T_float> > cam_pos;


  // distribute cameras uniformly in solid angle

  const T_float n_theta_eff = n_theta + 2/n_phi - 2;
  const T_float delta_theta = 2/n_theta_eff;

  for (int t=1; t < n_theta - 1; t++)
    for (int p=0; p < n_phi; p++) {
      const T_float theta= acos(1 - delta_theta*(1./n_phi + t - 0.5));
      const T_float phi=p*2*constants::pi/n_phi;
      cam_pos.push_back(std:: make_pair (theta, phi));
  }

  // add in south pole
  cam_pos.push_back(std::make_pair(constants::pi,0));


/*
  // for now just put cameras on each axis
  cam_pos.push_back(std::make_pair (constants::pi/2, 0.));
  cam_pos.push_back(std::make_pair (constants::pi/2, constants::pi/2));
  cam_pos.push_back(std::make_pair (constants::pi/2, constants::pi));
  cam_pos.push_back(std::make_pair (constants::pi/2, 3*constants::pi/2));
  cam_pos.push_back(std::make_pair (0., 0.));
  cam_pos.push_back(std::make_pair (constants::pi, 0.));
*/

//  cam_pos.push_back(std:: make_pair (0.1, 0.01) );
  auto_ptr<T_emergence> e(new T_emergence(cam_pos, cam_dist, 2.*grid_extent, 50));
  
  cout << "shooting" << endl;

  std::vector<T_rng::T_state> states= generate_random_states (n_threads);
  T_rng_policy rng;

  xfer<T_dust_model, T_grid, T_rng_policy> x (gr, *em, *e, model, rng, 1e-2, 1,n_forced,10.);
  long n_rays_actual = 0;
  dummy_terminator t;
  T_biaser b(vis_ref_lambda);
  shoot (x, scatter_shooter<T_biaser> (b), t, states, n_rays,
  	 n_rays_actual, n_threads,0, "");
  /*
  for(;n_rays_actual<n_rays;n_rays_actual++) {
    x.shoot(b);
  }
  */
  T_float normalization = 1./n_rays_actual;

  // for each camera sum luminosity received and store
  array_2 integrated_sed(e->n_cameras_loaded(),lambda.size()); integrated_sed = 0.;
  array_1 mean_sed(lambda.size()); mean_sed=0.;
//  T_image image;
//  array_1 sed;
//  array_2 temp;

  int ii=0;
  // loop over cameras and load images; images are in surface brightness
  // (W/m/kpc^2/sr) integrated over pixel solid angle, so we need
  // to convert units and multiply by 4*pi*d^2 to get L
  for (T_emergence::iterator i = e->begin(); i != e->end(); ++i, ++ii) {
    const T_emergence::T_camera::T_image image = (*i)->get_image ();
    // this sums over i, j indices (have to transpose b/c can only sum over
    // last index) to give sum of pixel values for each lambda
    integrated_sed(ii,Range::all()) = sum(sum(image(tensor::k, tensor::j,
      tensor::i), tensor::k), tensor::j);
    mean_sed(Range::all()) += integrated_sed(ii,Range::all());
  }

  mean_sed /= e->n_cameras_loaded();

  // need to multiply by 4*pi*d^2 to get inferred luminosity; we must also 
  // convert camera distance to meters
  const T_float to_m = units::convert(units.get("length"),"m");
  const T_float distance_factor = 4*constants::pi*cam_dist*cam_dist*to_m*to_m;
  cout << "Using distance factor = " << distance_factor << endl;
  cout << "Using normalization 1/nrays = " << normalization << endl;
  integrated_sed *= normalization*distance_factor;


  // calculate and output bolometric luminosity for each camera
  array_1 L_bol_cam(e->n_cameras_loaded()); L_bol_cam = 0.;
  T_float mean_L_bol_cam; mean_L_bol_cam = 0.;

  for (int i=0;i<L_bol_cam.size();++i) {
    L_bol_cam(i) = integrate_quantity(integrated_sed(i,Range::all()),lambda);
    cout << "Camera " << i << " bolometric luminosity"
      << ": " << L_bol_cam(i) << " W \n";
   mean_L_bol_cam += L_bol_cam(i);
  }

  mean_L_bol_cam /= e->n_cameras_loaded();

  cout << "Mean bolometric luminosity for all cameras: " << mean_L_bol_cam
       << " W \n";

  // write SED for each camera to file
  ofstream out_cam("cam_sed.txt");
  out_cam << "# Columns labeled Cam_<i> contain L_lambda (W/m) for each"
          << "  camera \n";
  out_cam << "# Lambda (m) \t";
  for (int c=0;c<L_bol_cam.size();++c)
    out_cam << "Cam_" << c << "\t";
  out_cam << "Mean \n";
  for (int l=0;l<lambda.size();++l) {
    out_cam << setw(10) << lambda(l) << "\t";
    for (int c=0;c<L_bol_cam.size();++c)
      out_cam << setw(10) << integrated_sed(c,l) << "\t";
    out_cam << setw(10) << mean_sed(l) << "\n";
  }
  out_cam.close();

  // calculate absorbed luminosity at each wavelength; what are units here?
  array_1 abs_sed(lambda.size());
  abs_sed = 0;
  for (T_grid::iterator c=gr.begin(); c!=gr.end(); ++c)
     abs_sed += model.total_abs_length_to_absorption(c->data()->densities())*
       c->data()->intensity();
  abs_sed *= normalization;

  // calculate and output bolometric luminosity absorbed
  T_float L_bol_abs;
  L_bol_abs = integrate_quantity(abs_sed,lambda);
  cout << "Bolometric luminosity absorbed in grid: " << L_bol_abs << " W \n";

  // now write SED
  ofstream out("absorbed_sed.txt");
  out << "# Total SED of energy absorbed in grid \n";
  out << "# Lambda (m) \t L_lambda (W/m) \n";
  for (int l=0; l<lambda.size(); ++l)
    out << setw(10) << lambda(l) << '\t' << abs_sed(l) << '\n';
  out.close();

  // write images; this frees image arrays, so we must do it last
  FITS output("mega.fits", Write);
  e->write_images(output,normalization,units, "STAR", false, false);

}



// don't do IR for now

  // now we start the interesting stuff...
  // make dust mass vector
/*
  array_2 dust_mass(gr.n_cells(), model.n_scatterers());
  array_2 star_intensity(gr.intensities().shape());

  int cc=0;
  const T_float area_factor = gr.area_factor();
  for(T_grid::const_iterator c=gr.begin(); c!=gr.end(); ++c, ++cc) {
    dust_mass(cc, Range::all()) = c->data()->densities()*c->volume();
    star_intensity(cc, Range::all()) =
      (gr.intensities()(cc,Range::all())*normalization/
       (4*constants::pi*c->volume()*area_factor));
  }

  uniform_factory ir_factory (rho,n_tol, lambda.size(), grid_extent);
  T_ir_grid irg(vec3d (-grid_extent,-grid_extent,-grid_extent),
		vec3d (grid_extent,grid_extent,grid_extent),
		vec3i (grid_n, grid_n,grid_n), gr.get_structure(),
		dust_mass, lambda);

  array_2 dust_intensity(gr.n_cells(), lambda.size());
  dust_intensity=0;

  // Determine dust equilibrium
  determine_dust_equilibrium_intensities
    (model,
     gr,
     irg,
     lambda,
     star_intensity,
     dust_intensity,
     states,
     n_rays_temp,
     n_threads,
     rel_tol,
     abs_tol,
     1e-2,
     10,
     t,
     false);

  // now generate output
  std::vector<grain_model<polychromatic_scatterer_policy, 
    mcrx_rng_policy>*> grain_models;
  for(int i=0; i< model.n_scatterers(); ++i) {
    grain_models.push_back
      (dynamic_cast<grain_model<polychromatic_scatterer_policy, 
       mcrx_rng_policy>*>(&model.get_scatterer(i)));
    grain_models.back()->resample(lambda);
  }

  e.reset(new T_emergence(cam_pos, 100, 2.1*grid_extent, 50));
  //e.reset(new T_emergence(3,5, 100, 2.1*grid_extent, 50, true));
  irg.reset_list();
  irg.calculate_SED(array_2(star_intensity + dust_intensity), grain_models,
  		    t);
  gr.reset();
  xfer<T_dust_model, polychromatic_grid, T_rng_policy> x2 (gr, irg, *e, model, rng, 1e-2, 0,0,10.);

  n_rays_actual = 0;
  T_biaser b2(ir_ref_lambda);
  scatter_shooter<T_biaser> s(b2);
  shoot (x2, s, t, states, n_rays,
  	 n_rays_actual, n_threads,0, "");
*/
  /*
  for(;n_rays_actual<n_rays;n_rays_actual++) {
    x2.shoot(b);
  }
  */

/*
  normalization = irg.total_emission_weight()/n_rays_actual;
  e->write_images(output,normalization,units, "IR", false, false);

  ofstream out("cell_seds.txt");
  for(T_ir_grid::iterator c=irg.begin(); c!=irg.end(); ++c)
    out << sqrt(dot(c->getcenter(),c->getcenter())) << '\n';
  out << "\n\n";
  for(T_ir_grid::iterator c=irg.begin(); c!=irg.end(); ++c) {
    for(int l=0; l<lambda.size(); ++l)
      out << lambda(l) << '\t' << c->data()->get_emission()(l) << '\n';
    out << "\n\n";
  }
*/

int main (int, char**)
{
  ir_thick_test();
}
