#include "covariance.h"
#include "dataset.h"
#include "tensor.h"
#include "time.h"
#include "vector_tools.h"
#include <algorithm>
#include <cassert>
#include <stdexcept>
#include <cstring>
#include <sstream>
#include <fftw3.h>

//initialize random number generator for DataSet
MTRand_int32 DataSet::irand(time(0));
bool DataSet::b_random_gaus=false;
double DataSet::next_gaus=0;
double DataSet::kernel_smoothing_factor=1.0;

bool DataSet::b_bias_only = false;

//remove power ambiguity
int int_pow(double a, double b)
{
  return static_cast<int>(pow(a,b));
}



//constructor
DataSet::DataSet(const vector<double>& min, const vector<double>& max,
		 const vector<double>& precision)
{
  if( min.size()!= max.size() || min.size()!= precision.size())
    throw std::runtime_error
 ("Trying to initialize a DataSet with different sizes of vectors as input");
  
  //now all is good, proceed
  //Initialize(min.size(), &min[0], &max[0], &precision[0]);
  Initialize(min, max, precision);

}

DataSet::DataSet(int nvar, double* min_, double* max_,  double* precision_)
{
  Initialize(nvar, min_, max_, precision_);
}

//constructor
void DataSet::Initialize(int nvar, const double* min_, 
			 const double* max_, const double* precision_)
{
  //make sure inputs are well defined
  assert(nvar>0);
  assert(min_!=NULL);
  assert(max_!=NULL);
  
  co=Covariance(nvar);
  fix_eff_n=-1.0;
  min.resize(nvar);
  max.resize(nvar);
  one_over_width.resize(nvar);
  width.resize(nvar);
  nbins.resize(nvar);
  precision.resize(nvar);

  //debug, see if vector constructor works
  assert(min.size() == max.size());

  //size for covariance matrix and data
  int datasize=1;

  for(int i=0; i<nvar; i++)
    {
      //max must be greater than min
      assert(max_[i]>min_[i]);

      //set min and max values
      min[i]=min_[i];
      max[i]=max_[i];

      //set the number of bins
      if(precision_==NULL)
	{
	  nbins[i]=ceil((max[i]-min[i])/0.01);
	  precision[i]=0.01;
	}
      else
	{
	  assert(precision_[i]>0);
	  nbins[i]=ceil((max[i]-min[i])/precision_[i]);
	  precision[i]=precision_[i];
	}
      
      //the size of each dimension must be greater than zero
      assert(nbins[i]>0);

      datasize*=nbins[i];
      //now we may compute the width
      one_over_width[i]=static_cast<double>(nbins[i])/(max[i]-min[i]);
      width[i]=(max[i]-min[i])/static_cast<double>(nbins[i]);
    }

  //initialize the data grid
  data=Tensor<double>(nbins);
  data2=Tensor<double>(nbins);
}

void DataSet::Initialize(const vector<double>& min_, 
			 const vector<double>& max_, 
			 const vector<double>& precision_)
{
  //make sure inputs are well defined
  int nvar=min_.size();
  assert(min_.size()==max_.size());
  assert(min_.size()==precision_.size());
  
  co=Covariance(nvar);
  fix_eff_n=-1.0;
  min.resize(nvar);
  max.resize(nvar);
  one_over_width.resize(nvar);
  width.resize(nvar);
  nbins.resize(nvar);
  precision.resize(nvar);

  //debug, see if vector constructor works
  assert(min.size() == max.size());

  //size for covariance matrix and data
  int datasize=1;

  for(int i=0; i<nvar; i++)
    {
      //max must be greater than min
      assert(max_[i]>min_[i]);

      //set min and max values
      min[i]=min_[i];
      max[i]=max_[i];

      assert(precision_[i]>0);
      nbins[i]=ceil((max[i]-min[i])/precision_[i]);
      precision[i]=precision_[i];
      
      //the size of each dimension must be greater than zero
      assert(nbins[i]>0);

      datasize*=nbins[i];
      //now we may compute the width
      one_over_width[i]=static_cast<double>(nbins[i])/(max[i]-min[i]);
      width[i]=(max[i]-min[i])/static_cast<double>(nbins[i]);
    }

  //initialize the data grid
  data=Tensor<double>(nbins);
  data2=Tensor<double>(nbins);
}


void DataSet::SetN(double eff_n)
{
  fix_eff_n=eff_n;
}

void DataSet::Fill(const vector<double>& ntuple, double weight)
{
  //fill covariance matrix for bandwidth computation
  co.Fill(ntuple, weight);
  
  //increment the given bin
  vector<int> bin=GetBin(ntuple);

  data[bin]+=weight;
  data2[bin]+= (weight*weight);
  
}

PDF DataSet::ComputePDF(bool timer_mode)
{
  //We proceed in the following way
  //1. bin the kernel and data
  //2. add padding zeroes to prevent looping
  //3. proceed to fftw routines (fourier transform)
  //4. do convolutions and what not
  //5. estimate errors via convolution
  

  ///////////////////////////////////
  /// Initialize PDF
  ///////////////////////////////////

  //first compute the bandwidth matrix
  double smoothing_size=1.0;
  for(unsigned int i=0; i<width.size(); ++i)
    smoothing_size*=width[i];

  co.ComputeBandwidth(smoothing_size, kernel_smoothing_factor, fix_eff_n);

  //factor to undo the extra smoothing
  //used for bias computation
  double undo_extra_factor=pow(kernel_smoothing_factor, -2.0/(co.rank+4));

  if(timer_mode)
    for(unsigned int i=0; i<width.size(); ++i)
      {
	//output the width
	cout<<"width size: "<<width[i]<<endl;
	//output the smoothing size
	cout<<"smoothing size: "
	    <<1.0/sqrt(co.bandwidth_inverse[i*Rank()+i])<<endl;
      }
  
  ///////////////////////////////////////////////////
  //// Computing Kernel+padding and initialize PDF
  ///////////////////////////////////////////////////
  
  //next we get a grid representing the kernel function
  //we do not allow smoothing size that is too big
  //i.e. across half of the pdf
  Tensor<double> kernel(data.length/4);

  //over-smoothed kernel for computing bias
  Tensor<double> kernel_oversmooth(data.length/4);

  //we'll also need kenerl^2 to compute variance
  Tensor<double> kernel2(data.length/4);
  
  //we will only need the kernel in the positive "cube"
  //i.e. all coordinates are >= 0
  //the fftw will take care of symmetric reflections
  //we also keep a list of maximum smoothing distance
  //this will be useful for adding padding zeroes
  vector<int> max_size(data.rank);
  
  //initial point
  //bin center at bin 0
  vector<double> initial_pt = GetBinCenter(vector<int>(data.rank));  



  //now proceed to construct the kernel function
  int smoothing_num=0;
  for(unsigned int i=0; i<kernel.size(); ++i)
    { 
      //get the n dimensional representation of the bin
      vector<int> index = kernel.IndexVec(i);
      
      vector<double> dist = 
	(GetBinCenter(index) - initial_pt);

      //get actual 1/bandwidth^2*distance^2
      //before exponentiating
      double dist2=co.BandwidthNorm(dist);
      
      //over-smoothed distance for computing bias
      //double dist2_oversmooth=dist2 ; //*co.second_derivative_factor;
      double dist2_oversmooth=dist2 ;
      //*co.second_derivative_factor
      //*undo_extra_factor;


      //don't do anything if further away than 3 sigma
      if(dist2_oversmooth > 12.) continue;
      
      //find max_grid_distance
      for(unsigned int j=0; j<index.size(); ++j)
	if(index[j]>max_size[j])
	  max_size[j]=index[j];
      
      //since this bin has a non-zero smoothing
      //store the amount of smoothing
      kernel[i]= exp(-0.5*dist2);
      kernel_oversmooth[i]= exp(-0.5*dist2_oversmooth);
      kernel2[i]= kernel[i]*kernel[i];
      ++smoothing_num;
    }

  //initialize a PDF
  PDF pdf;

  //since we have the size of the smoothing grid
  //get the minimum error
  double min_err=1.833*co.sum2/(co.sum*co.sum*smoothing_num);
  
  //DEBUG
  if(timer_mode)
    cout<<"Minimum PDF error (make sure it's small): "<<min_err<<endl;
    
  pdf.Initialize(data.rank, &min[0], &max[0], &precision[0], min_err);
  Tensor<double> pdf_oversmooth(data.length);
  Tensor<double> pdf_doublesmooth(data.length);
  /*
  //initialize the grid in PDF
  //number of bins, dimensions info...etc
  pdf.data= Tensor(data.length);
  
  //this stores the bias^2 of Kernel smoothing
  pdf.data_bias= Tensor(data.length);

  //this stores the variance
  //though will need proper normalization
  pdf.data_var= Tensor(data.length);

  //and finally the error
  //error = sqrt(bias^2 + variance);
  pdf.data_err= Tensor(data.length);
  */

  //initialize timer to see how much time is taken
  clock_t start=clock();
  clock_t diff;

  //now we need to determine padding sizes 
  //to carry out fftw
  //large composite numbers are fastests, so things like
  //2^n, or 2^n*3^m where n,m are large are ideal
  int max_length= maximum(data.length);
  max_length=log2(max_length);
  
  vector<int> list_of_sizes;
  list_of_sizes.reserve(100);
  //generate a list of good composite numbers
  //using small primes
  //we also demand it to be even
  //for easier manipulation
  for(int i=0; i<max_length; ++i)
  for(int j=0; j<max_length; ++j)
  for(int k=0; k<max_length; ++k)
    list_of_sizes.push_back(int_pow(2,i+1)*int_pow(3,j)*int_pow(5,k));

  //get the list
  sort(list_of_sizes.begin(), list_of_sizes.end());
  
  //now we'll figure out all the sizes
  vector<int> sizes= data.length + max_size*2;
  vector<int> kernel_sizes(data.rank);

  for(unsigned int i=0; i<sizes.size(); ++i)
    for(unsigned int j=0; j<list_of_sizes.size(); ++j)
      if(list_of_sizes[j] > sizes[i])
	{
	  //we've found a good size!
	  sizes[i]=list_of_sizes[j];
	  kernel_sizes[i]=list_of_sizes[j]/2+1;
	  break;
	}

  //now get the padded sizes for data and kernel
  //and helper array to convert 1d index to multi-d index
  int data_padded_size=1;
  vector<int> data_padded_helper(data.rank);

  int data_complex_size=1;
  vector<int> data_complex_helper(data.rank);
  
  int kernel_padded_size=1;
  vector<int> kernel_padded_helper(data.rank);
  
  for(int i=data.rank-1; i>=0; --i)
    {
      data_padded_helper[i]=data_padded_size;
      data_complex_helper[i]=data_complex_size;
      kernel_padded_helper[i]=kernel_padded_size;
      data_padded_size*= sizes[i];

      if(i==data.rank-1)
	data_complex_size*= (sizes[i]/2+1);
      else
	data_complex_size*= sizes[i];

      kernel_padded_size*= kernel_sizes[i];
    }

  ///////////////////////////////////
  /// Initialize FFTW
  ///////////////////////////////////

  //now initialize fftw routines
  fftw_complex* out_data_complex;
  double* in_data;
  double* in_kernel, *out_kernel;

  //we need to use fftw routines
  //to allocate memeory for different arrays

  out_data_complex= (fftw_complex*) 
    fftw_malloc(sizeof(fftw_complex)* data_complex_size);

  in_data= (double*) 
    fftw_malloc(sizeof(double)* data_padded_size);

  in_kernel = (double *)
    fftw_malloc(sizeof(double)* kernel_padded_size);

  out_kernel = (double *)
    fftw_malloc(sizeof(double)* kernel_padded_size);

  //now we want to set all the arrays to zero first
  memset(out_data_complex, 0, sizeof(fftw_complex)* data_complex_size);
  memset(in_data, 0, sizeof(double)* data_padded_size);
  memset(in_kernel, 0, sizeof(double)* kernel_padded_size);
  memset(out_kernel, 0, sizeof(double)* kernel_padded_size);
  
  //initialize plans for doing fftw
  fftw_plan fftw_data, fftw_kernel, fftw_final;
  
  // plans for fourier transforming data
  fftw_data= fftw_plan_dft_r2c(data.rank, &sizes[0],
			       in_data, out_data_complex, FFTW_ESTIMATE);

  // plans for fourier transforming kernel
  // the plans is different since we are exploiting
  // specify boundary conditions 
  // i.e. even for all reflections
  vector<fftw_r2r_kind> kind(data.rank, FFTW_REDFT00);
  fftw_kernel= fftw_plan_r2r(data.rank, &kernel_sizes[0],
			     in_kernel, out_kernel,
			     &kind[0], FFTW_ESTIMATE);

  // plans to fourier transforming back the convolution
  fftw_final= fftw_plan_dft_c2r(data.rank, &sizes[0],
				out_data_complex, in_data, FFTW_ESTIMATE);



  //fftw initialization for bias
  //need to declare new variables for doubly-smoothed distribution
  double* in_data_double_kernel;
  in_data_double_kernel = (double *)
    fftw_malloc(sizeof(double)* data_padded_size);
  
  fftw_complex* out_data_double_kernel;
  out_data_double_kernel= (fftw_complex*) 
    fftw_malloc(sizeof(fftw_complex)* data_complex_size);
  
  memset(in_data_double_kernel, 0, sizeof(double)* data_padded_size);
  memset(out_data_double_kernel, 0, sizeof(fftw_complex)* data_complex_size);
  
  // plans to fourier transforming back the convolution
  fftw_plan fftw_final_double_kernel;
  fftw_final_double_kernel = fftw_plan_dft_c2r
    (data.rank, &sizes[0], out_data_double_kernel, in_data_double_kernel,
     FFTW_ESTIMATE);



  ///////////////////////////////////
  /// Initialize data input
  ///////////////////////////////////
  
  // populate data to the input to fftw include padding
  for(unsigned int i=0; i<data.size(); i++)
    {
      //get the multidimensional index
      vector<int> index= data.IndexVec(i);
      
      //populate the data input
      in_data[ (index*data_padded_helper) ] = data[i];
      //the rest is zero
    }

  // populate kernel to the input to fftw include padding
  for(unsigned int i=0; i<kernel.size(); i++)
    if(kernel[i]>0)
      {
	//get the multidimensional index
	vector<int> index= kernel.IndexVec(i);
	
	//populate the kernel input
	in_kernel[ (index*kernel_padded_helper) ] = kernel[i];
	
	//the rest is zero
      }

  //see how much time it has taken
  if(timer_mode)
    {
      diff=clock();
      cout<<"ComputePDF FFTW Initialization done..."
	  <<Time(diff-start)<<" elapsed"<<endl;
    }

  //perform fourier transform for data and kernel
  fftw_execute(fftw_data);
  fftw_execute(fftw_kernel);

  //compute fourier transform of convolution
  for(unsigned int i=0; i<data_complex_size; ++i)
    {
      //since out_kernel have different dimensions
      //when comparing to out_complex
      //we need to figure out what is multiplying what
      vector<int> index_vec(data.rank);
      int i_temp=i;
      for(unsigned int j=0; j<data.rank; ++j)
	{
	  index_vec[j]=i_temp/data_complex_helper[j];
	  //now if the index is too large
	  //wrap around
	  if(index_vec[j] >= kernel_sizes[j])
	    {
	      index_vec[j] = sizes[j] - index_vec[j];
	    }

	  i_temp%=data_complex_helper[j];
	}
      
      //now we know which kernel coefficient to multiply
      int index_kernel= (index_vec*kernel_padded_helper);

      //multiply both real and complex part
      out_data_complex[i][0]*=out_kernel[index_kernel];
      out_data_complex[i][1]*=out_kernel[index_kernel];

      //keep the values in out_data_double_kernel
      out_data_double_kernel[i][0] = out_data_complex[i][0];
      out_data_double_kernel[i][1] = out_data_complex[i][1];
    }

  //convolute back
  fftw_execute(fftw_final);

  //populate the output
  for(unsigned int i=0; i<data.size(); i++)
    {
      //get the multidimensional index
      vector<int> index= data.IndexVec(i);
      
      //populate the data input
      pdf.data[i] = in_data[ (index*data_padded_helper) ];      

      //it may happen that the pdf result is negative
      //due to rounding errors
      //reset to zero
      if(pdf.data[i] < 0.)
	pdf.data[i]=0;
    }

  //see how much time it has taken
  if(timer_mode)
    {
      diff=clock();
      cout<<"PDF Convolution done..."
	  <<Time(diff-start)<<" elapsed"<<endl;
    }
    
  //now rescale the pdf
  //get a norm and compute integral
  //double sum=summation(pdf.data.ary);
  double sum=summation(pdf.data.ary);

  //get volume of the base space
  double volume=1.0;
  for(unsigned int i=0; i<width.size(); i++)
    volume*= width[i];

  if(sum <= 0)
    throw std::runtime_error
	("PDF total integral is identically 0, please make sure the dataset is non-empty");
  

  //approximate integral normalization
  double scale= 1.0/(sum*volume);
  
  //scale 2 is used to normalize variance
  //however an extra factor of N is included
  //since fftw does not include a normalization N
  double scale2= (data_padded_size*scale*scale);

  //we now perform convolution for data2
  //to obtain the variance

  ///////////////////////////////////
  /// Initialize variance input
  ///////////////////////////////////
  
  memset(out_data_complex, 0, sizeof(fftw_complex)* data_complex_size);
  memset(in_data, 0, sizeof(double)* data_padded_size);
  memset(in_kernel, 0, sizeof(double)* kernel_padded_size);
  memset(out_kernel, 0, sizeof(double)* kernel_padded_size);

  // populate data2 to the input to fftw include padding
  for(unsigned int i=0; i<data2.size(); i++)
    {
      //get the multidimensional index
      vector<int> index= data2.IndexVec(i);
      
      //populate the data input
      in_data[ (index*data_padded_helper) ] = data2[i];
      //the rest is zero
    }

  // populate kernel2 to the input to fftw include padding
  for(unsigned int i=0; i<kernel2.size(); i++)
    if(kernel2[i]>0)
      {
	//get the multidimensional index
	vector<int> index= kernel2.IndexVec(i);
	
	//populate the kernel input
	in_kernel[ (index*kernel_padded_helper) ] = kernel2[i];
	
	//the rest is zero
      }

  //perform fourier transform for data2 and kernel2
  fftw_execute(fftw_data);
  fftw_execute(fftw_kernel);
  
  //compute fourier transform of convolution
  for(unsigned int i=0; i<data_complex_size; ++i)
    {
      //since out_kernel have different dimensions
      //when comparing to out_complex
      //we need to figure out what is multiplying what
      vector<int> index_vec(data.rank);
      int i_temp=i;
      for(unsigned int j=0; j<data.rank; ++j)
	{
	  index_vec[j]=i_temp/data_complex_helper[j];
	  //now if the index is too large
	  //wrap around
	  if(index_vec[j] >= kernel_sizes[j])
	    {
	      index_vec[j] = sizes[j] - index_vec[j];
	    }

	  i_temp%=data_complex_helper[j];
	}
      
      //now we know which kernel coefficient to multiply
      int index_kernel= (index_vec*kernel_padded_helper);

      //multiply both real and complex part
      out_data_complex[i][0]*=out_kernel[index_kernel];
      out_data_complex[i][1]*=out_kernel[index_kernel];
    }

  //convolute back
  fftw_execute(fftw_final);

  //populate the output
  for(unsigned int i=0; i<data2.size(); i++)
    {
      //get the multidimensional index
      vector<int> index= data2.IndexVec(i);
      
      //populate the data input
      pdf.data_var[i] = in_data[ (index*data_padded_helper) ];      

      //it may happen that the pdf result is negative
      //due to rounding errors
      //reset to zero
      if(pdf.data_var[i] < 0.)
	pdf.data_var[i]=0;
    }


  //see how much time it has taken
  if(timer_mode)
    {
      diff=clock();
      cout<<"Variance Convolution done..."
	  <<Time(diff-start)<<" elapsed"<<endl;
    }


  //now rescale the pdf and compute full errors
  //valarry does vector multiply
  pdf.data.ary*=scale;
  
  //need to rescale variance
  pdf.data_var.ary*= scale2;

  ////////////////////////////
  /// bias computation
  ////////////////////////////

  //now we want to set all the arrays to zero first
  memset(out_data_complex, 0, sizeof(fftw_complex)* data_complex_size);
  memset(in_data, 0, sizeof(double)* data_padded_size);
  memset(in_kernel, 0, sizeof(double)* kernel_padded_size);
  memset(out_kernel, 0, sizeof(double)* kernel_padded_size);
  
  //now we need to estimate the bias
  //first we obtain another smoothed dataset with a larger bandwidth
  // populate data to the input to fftw include padding
  for(unsigned int i=0; i<data.size(); i++)
    {
      //get the multidimensional index
      vector<int> index= data.IndexVec(i);
      
      //populate the data input
      in_data[ (index*data_padded_helper) ] = data[i];
      //the rest is zero
    }

  // populate kernel to the input to fftw include padding
  for(unsigned int i=0; i<kernel_oversmooth.size(); i++)
    if(kernel_oversmooth[i]>0)
      {
	//get the multidimensional index
	vector<int> index= kernel_oversmooth.IndexVec(i);
	
	//populate the kernel input
	in_kernel[ (index*kernel_padded_helper) ] = kernel_oversmooth[i];
	
	//the rest is zero
      }

  //perform fourier transform for data and kernel
  fftw_execute(fftw_data);
  fftw_execute(fftw_kernel);

  //compute fourier transform of convolution
  for(unsigned int i=0; i<data_complex_size; ++i)
    {
      //since out_kernel have different dimensions
      //when comparing to out_complex
      //we need to figure out what is multiplying what
      vector<int> index_vec(data.rank);
      int i_temp=i;
      for(unsigned int j=0; j<data.rank; ++j)
	{
	  index_vec[j]=i_temp/data_complex_helper[j];
	  //now if the index is too large
	  //wrap around
	  if(index_vec[j] >= kernel_sizes[j])
	    {
	      index_vec[j] = sizes[j] - index_vec[j];
	    }

	  i_temp%=data_complex_helper[j];
	}
      
      //now we know which kernel coefficient to multiply
      int index_kernel= (index_vec*kernel_padded_helper);

      //multiply both real and complex part
      out_data_complex[i][0]*=out_kernel[index_kernel];
      out_data_complex[i][1]*=out_kernel[index_kernel];

      //now get the doubly smoothed pdf
      out_data_double_kernel[i][0]*=out_kernel[index_kernel];
      out_data_double_kernel[i][1]*=out_kernel[index_kernel];
    }

  //convolute back
  fftw_execute(fftw_final);

  //also for the double kernel
  fftw_execute(fftw_final_double_kernel);
  
  //populate the over_smoothed output
  for(unsigned int i=0; i<pdf_oversmooth.size(); i++)
    {
      //get the multidimensional index
      vector<int> index= data.IndexVec(i);
      
      //populate the data input
      int temp_index=(index*data_padded_helper);
      pdf_oversmooth[i] = in_data[ temp_index ];      
      pdf_doublesmooth[i] = in_data_double_kernel[ temp_index ];

      //it may happen that the pdf result is negative
      //due to rounding errors
      //reset to zero
      if(pdf_oversmooth[i] < 0.)
	pdf_oversmooth[i]=0;

      if(pdf_doublesmooth[i] < 0.)
	pdf_doublesmooth[i]=0;
    }


  //scale the pdf to integrate to one
  double scale_oversmooth =
    1.0/(summation(pdf_oversmooth.ary)*volume);
  pdf_oversmooth.ary*=scale_oversmooth;

  double scale_doublesmooth = 
    1.0/(summation(pdf_doublesmooth.ary)*volume);
  pdf_doublesmooth.ary*=scale_doublesmooth;

  //bias will be obtained from the difference of the two;
  pdf.data_bias.ary= pdf_doublesmooth.ary - pdf_oversmooth.ary;

  ///////////////////////////////////////////
  //// computation of bias through laplacian
  //////////

  /*  
  
  //bias will be obtained from the laplacian
  //i.e. bias = - sum of  Hij  grad_i e_i /2
  // we estimate this by 
  // 2*trace(H)*pdf -  sum_direction Hii (ary[h+i] + ary[h-i])
  // (boundaries ary values will be assumed zero)
  
  //first grab the needed variables
  vector<double> hii(data.rank);
  vector<int> delta_index(data.rank);
  double trace_H=0;

  for(int i=0; i<data.rank; ++i)
    {
      hii[i] = 0.5*co.bandwidth[i*data.rank+i]*
	one_over_width[i]*one_over_width[i];
      delta_index[i] = data.index_helper[i];
      trace_H += hii[i];
    }

  
  //first set bias = 2*trace* pdf
  //pdf.data_bias.ary = pdf.data.ary;
  pdf.data_bias.ary = pdf_oversmooth.ary;
  pdf.data_bias.ary *= (2.0*trace_H);
  
  //now for the sake of code safety
  //we demand that the number of bins in each dimension must
  //be at least 3, so no segmentation fault
  for(unsigned int i=0; i<data.length.size(); ++i)
    {
      if(data.length[i]<3)
	throw std::runtime_error
	  ("Cannot obtain second derivative estimates in PDF bias when the length of dataset is less than 3");
    }


  //now go through the bias
  for(unsigned int i=0; i<pdf.data_bias.size(); ++i)
    {
      //get the multidimensional index
      vector<int> index = data.IndexVec(i);

      //loop over the direction
      for(unsigned int j=0; j<index.size(); ++j)
	{
	  //if it is an endpoint, take the 2nd derivate
	  //in neighboring bin
	  if(index[j] == data.length[j]-1)
	    pdf.data_bias[i]-= (hii[j]*
				(pdf_oversmooth[i-2*delta_index[j]] -
				 2*pdf_oversmooth[i-delta_index[j]] +
				 3*pdf_oversmooth[i]
				 ));

	  else if(index[j] == 0)
	    pdf.data_bias[i]-= (hii[j]*
				(pdf_oversmooth[i+2*delta_index[j]] -
				 2*pdf_oversmooth[i+delta_index[j]] +
				 3*pdf_oversmooth[i]
				 ));
	  
	  else
	    pdf.data_bias[i]-= (hii[j]*
				(pdf_oversmooth[i-delta_index[j]] +
				 pdf_oversmooth[i+delta_index[j]]
				 ));
	}
    }
  
  
  //end laplacian bias computation
  */


  //only include bias if flag is enabled
  if(b_bias_only)
    pdf.data_err.ary = pdf.data_bias.ary;

  //we can get the true error
  else
    for(unsigned int i=0; i<pdf.data_err.size(); ++i)
      pdf.data_err.ary[i] = sqrt(pdf.data_var.ary[i] + 
				 pdf.data_bias.ary[i]*pdf.data_bias.ary[i]);


  //Finally, our pdf is computed!
  //set status to valid
  pdf.valid=true;  

  
  //clean up after ourselves
  fftw_destroy_plan(fftw_data);
  fftw_destroy_plan(fftw_kernel);
  fftw_destroy_plan(fftw_final);
  fftw_destroy_plan(fftw_final_double_kernel);

  fftw_free(out_data_complex);
  fftw_free(in_data);
  fftw_free(in_kernel);
  fftw_free(out_kernel);
  fftw_free(in_data_double_kernel);
  fftw_free(out_data_double_kernel);

  //see how much time it has taken
  if(timer_mode)
    {
      diff=clock();
      cout<<"PDF computed ! ...total "
	  <<Time(diff-start)<<" elapsed"<<endl;
    }


  return pdf;
}

//Generate new DataSet based on fluctuation
//input = scale factor 
DataSet DataSet::GenerateDataSet(double scale_factor) const
{
  //first declare a new DataSet
  //using copy constructor
  DataSet result(*this);

  double eff_n_total=0;

  //we now modify all the values in the dataset
  //the data2 remains the same however
  
  for(unsigned int i=0; i<data.size(); ++i)
    if(data[i]>0)
    {
      //grab a normal random number
      //double random_normal=Random_Gaus()*sqrt(data2[i])/data[i];
      
      //smear the result
      //result.data[i]*=exp(random_normal);


      //grab a poisson random number
      if(scale_factor<=0)
	{
	  double eff_n=data[i]*data[i]/data2[i];
	  double random_n=Random_Poisson(eff_n);
	  double random_poisson=(data2[i]/data[i])*random_n;
	  result.data[i]=random_poisson;
	  result.data2[i]=(data2[i]/data[i])*random_poisson;
	}

     //if scale up to lhc result
      else
	{
	  double eff_n=data[i]*scale_factor;
	  double random_poisson=Random_Poisson(eff_n);
	  result.data[i]=random_poisson;
	  result.data2[i]=random_poisson;	  
	  eff_n_total+=random_poisson;
	}

    }

  //need to normalize covariance to eff_n
  if(scale_factor > 0)
    result.SetN(eff_n_total);

  return result;
}

//Generate new DataSet based on fluctuation
//input = scale factor 
DataSet DataSet::GenerateFixDataSet() const
{
  //first declare a new DataSet
  //using copy constructor
  DataSet result(*this);

  //now get a gigantic vector storing all the data filling the bins
  vector<int> all_data;
  all_data.reserve((int)co.sum);

  for(int i=0; i<data.ary.size(); ++i)
    if(data.ary[i]>0)
    {
      vector<int> temp_data((int)data.ary[i], i);
      all_data.insert(all_data.end(), temp_data.begin(), temp_data.end());
    }

  
  //we now modify all the values in the dataset
  for(unsigned int i=0; i<all_data.size(); ++i)
    {
      //grab a random integer
      //double random_normal=Random_Gaus()*sqrt(data2[i])/data[i];
      int temp_random=-1;
      while(temp_random<0 || temp_random >= all_data.size())
	temp_random= (int) irand.rand_double()*(all_data.size()+1)-1;
	
      ++result.data[all_data[temp_random]];
      ++result.data2[all_data[temp_random]];
    }

  //need to normalize covariance to eff_n
  result.SetN(all_data.size());

  return result;
}



PDF DataSet::ComputePDF_DirectSmoothing(bool timer_mode)
{
  //DEBUG
  //cout<<"Starting PDF Computation..."<<endl;

  //first find minimum size
  double smoothing_size=1.0;
  for(unsigned int i=0; i<width.size(); ++i)
    smoothing_size*=width[i];

  //first compute the bandwidth
  //from covariance matrix
  co.ComputeBandwidth(smoothing_size, kernel_smoothing_factor);

  //identify the middle of the whole grid
  //we want to identify the area for smoothing
  //vector/2 means vector division
  vector<int> midpoint= (data.length/2);
  int midpoint_index= data.Index(midpoint);

  //relative index keeps track of what bins
  //in the grids are relevant
  //weight keeps track of the smoothing weight
  vector<int> relative_index;
  vector<double> relative_index_weight;
  vector<double> relative_index_dist2;
  
  relative_index.reserve(100);
  relative_index_weight.reserve(100);
  relative_index_dist2.reserve(100);

  //now compute exp^( 0.5*1/bandwidth^2* dist*dist )
  //normalization will be carried out when taking slices of PDF
  //no reason to normalize now
  
  //DEBUG
  //cout<<"Determining smoothing grid..."<<endl;

  //keep track of furthest neighbor that needs smoothing
  int max_grid_dist=0;
  for(unsigned int i=0; i<data.size(); i++)
    {
      //get the distance vector
      //current vec - midpoint

      //DEBUG
      /*
      vector<int> int_index=data.IndexVec(i);
      cout<<"Index: "<<int_index[0]<<endl;
      cout<<"Midpoint: "<<midpoint[0]<<endl;
      cout<<"BinCenter first: "<<GetBinCenter(int_index)[0]<<endl;
      cout<<"BinCenter latter: "<<GetBinCenter(midpoint)[0]<<endl;
      */

      vector<double> dist = 
	GetBinCenter(data.IndexVec(i)) - GetBinCenter(midpoint);

      //get actual 1/bandwidth^2*distance^2
      //before exponentiating
      double dist2=co.BandwidthNorm(dist);
      
      //don't do anything if further away than 3 sigma
      if(dist2 > 9.) continue;
      

      //find max_grid_distance
      int current_grid_dist= data.GridDistance(i, midpoint_index);
      if(current_grid_dist > max_grid_dist)
	max_grid_dist = current_grid_dist;

      //since this bin has a non-zero smoothing
      //push it back
      relative_index.push_back(midpoint_index-i);
      relative_index_weight.push_back(exp(-0.5*dist2));
      relative_index_dist2.push_back(dist2);
      
    }


  //DEBUG
  //cout the weights;
  /*
  for(unsigned int i=0; i<relative_index.size(); i++)
    {
      cout<<relative_index[i]
	  <<", "<<relative_index_weight[i]
	  <<", "<<relative_index_dist2[i]<<endl;
    }
  */  

  //initialize a PDF first;
  PDF pdf;

  //since we have the size of the smoothing grid
  //get the minimum error
  double min_err=1.833*co.sum2/(co.sum*co.sum*relative_index.size());
  
  //DEBUG
  if(timer_mode)
    cout<<"Minimum PDF error (make sure it's small): "<<min_err<<endl;
    
  pdf.Initialize(data.rank, &min[0], &max[0], &precision[0], min_err);


  //DEBUG
  //cout<<"Smoothing Size: "<<relative_index.size()<<endl;
  //cout<<"Computing Kernel Smoothing and error..."<<endl;
  
  //give a warning if computation time is expected to be large
  if(pdf.data.size() * relative_index.size() > 10000000000)
    {
      cout<<"WARNING: more than 10B bins of computation needed"<<endl;
      cout<<"WARNING: consider decreasing precision (increasing bin size)"
	  <<endl; 
    }
  
  //now do smoothing
  //initialize timer to see how much time is needed
  clock_t start=clock();
  clock_t diff;

  for(unsigned int i=0; i<pdf.data.size(); ++i)
    {

      //output timer info 
      if(timer_mode)
      if( (i>1 && i<=1000 && (i-1)% 100 == 0) ||
	  (i>1 && (i-1)% 5000 == 0) )
	{
	  diff= static_cast<double>(clock()-start)*(pdf.data.size()-i)/(i);
	  
	  stringstream sout;
	  
	  cout<<"\r\033[K"<<floor((i-1)*100./pdf.data.size())
	      <<" % processed..."
	      << Time(diff)
	      <<flush;
	}


      //now we have to loop over all the valid neighbors
      for(unsigned int j=0; j<relative_index.size(); ++j)
	{
	  int grid_index=i+relative_index[j];

	  //make sure index is value
	  if(!pdf.data.ValidIndex(grid_index))
	    continue;

	  //make sure it's not beyond the boundary
	  if(pdf.data.GridDistance(i, grid_index) > max_grid_dist)
	    continue;

	  //sum of content in the bin, weighted by smoothing
	  double value_to_add=relative_index_weight[j]*data[grid_index];

	  //sum of content^2 in the bin, used to calculate variance
	  double value_to_add2=relative_index_weight[j]*relative_index_weight[j]*
	    data2[grid_index];

	  //now add up the bins
	  pdf.data[i]+=value_to_add;

	  //relative variance = sum weight^2 / (sum weight)^2
	  //when all weights are one and widths are thin
	  //this reduces to ~ 1/N (N approx = number of data points
	  //in a neighborhood
	  pdf.data_var[i]+= value_to_add2;


	  //get the bias
	  //bias =
	  //0.5*mu2*(-d + xT H x) exp(-0.5*dist2)*pdf_normalization
	  //where mu2 defined as
	  //int Kernel xi xj = mu2 * delta_{ij}
	  //for gaussian it's simply mu2=1
	  //we will take care of normalization later
	  
	  //*****
	  // OLD CODE :
	  // uses gaussian kernel smoothing
	  // bias too large when PDF is small
	  // not reliable, variance on the second
	  // derivative estimate too high

	  /*
	  pdf.data_bias[i]+=0.5*value_to_add*
	    (relative_index_dist2[j]-data.rank);
	  */

	  //also compute higher derivative terms
	  //that go like
	  // h^4/4! *
	  // 3(d^2 + 2d) <- 3d(d+2)
	  // -6x^2(d+2) <- -6 dist2 (d+2)
	  // + 3*x^4  <- 3 dist2 * dist2
	  //where x^2 = dist2

	  /*
	  double dp2=data.rank+2;

	  double term1= 3*data.rank*dp2;
	  double term2= -6*relative_index_dist2[j]*dp2;
	  double term3= 3*relative_index_dist2[j]*relative_index_dist2[j];

	  pdf.data_bias[i]+=(1/24.)*(term1+term2+term3)*value_to_add;
	  */
	  
	}
    }

  //get current time
  if(timer_mode)
    {
      diff=clock();
      cout<<"\r\033[K"<<"100% finished. "
	  <<Time(diff-start)<<" elapsed"<<endl;
    }


  //rescale bias and variance vector to get percentage bias
  /*
  for(unsigned int i=0; i<pdf.data_bias.size(); ++i)
    {
      if(pdf.data[i] > 1e-21)
	{
	  pdf.data_bias[i]/=pdf.data[i];
	  pdf.data_var[i]/= (pdf.data[i]*pdf.data[i]);
	}
      else
	{
	  pdf.data_bias[i]=0;
	  pdf.data_var[i]=0;
	}
    }
  */

  //now rescale the pdf
  //get a norm and compute integral
  double sum=summation(pdf.data.ary);

  //get volume of the base space
  double volume=1.0;
  for(unsigned int i=0; i<width.size(); i++)
    volume*= width[i];

  //approximate integral normalization
  double scale=1.0/(sum*volume);
  double scale2=scale*scale;

  //now rescale the pdf and compute full errors
  //valarry does vector multiply
  pdf.data.ary*=scale;
  
  //need to rescale variance
  pdf.data_var.ary*=scale2;

  //now we need to estimate the bias
  //bias will be obtained from the laplacian
  //i.e. bias = sum of  Hij  grad_i e_i /2
  // we estimate this by 
  // -2*trace(H)*pdf +  sum_direction Hii (ary[h+i] + ary[h-i])
  // (boundaries ary values will be assumed zero)

  //first grab the needed variables
  vector<double> hii(data.rank);
  vector<int> delta_index(data.rank);
  double trace_H=0;

  for(int i=0; i<data.rank; ++i)
    {
      hii[i] = 0.5*co.bandwidth[i*data.rank+i]*
	one_over_width[i]*one_over_width[i];
      delta_index[i] = data.index_helper[i];
      trace_H += hii[i];
    }

  
  //first set bias = 2*trace* pdf
  pdf.data_bias.ary = pdf.data.ary;
  pdf.data_bias.ary *= (-2.0*trace_H);
  
  //now for the sake of code safety
  //we demand that the number of bins in each dimension must
  //be at least 3, so no segmentation fault
  for(unsigned int i=0; i<data.length.size(); ++i)
    {
      if(data.length[i]<3)
	throw std::runtime_error
	  ("Cannot obtain second derivative estimates in PDF bias when the length of dataset is less than 3");
    }


  //now go through the bias
  for(unsigned int i=0; i<pdf.data_bias.size(); ++i)
    {
      //get the multidimensional index
      vector<int> index = data.IndexVec(i);

      //loop over the direction
      for(unsigned int j=0; j<index.size(); ++j)
	{
	  //if it is an endpoint, take the 2nd derivate
	  //in neighboring bin
	  if(index[j] == data.length[j]-1)
	    pdf.data_bias[i]+= (hii[j]*
				(pdf.data[i-2*delta_index[j]] -
				 2*pdf.data[i-delta_index[j]] +
				 3*pdf.data[i]
				 ));

	  else if(index[j] == 0)
	    pdf.data_bias[i]+= (hii[j]*
				(pdf.data[i+2*delta_index[j]] -
				 2*pdf.data[i+delta_index[j]] +
				 3*pdf.data[i]
				 ));
	  
	  else
	    pdf.data_bias[i]+= (hii[j]*
				(pdf.data[i-delta_index[j]] +
				 pdf.data[i+delta_index[j]]
				 ));
	}
    }

  //we can get the true error
  for(unsigned int i=0; i<pdf.data_err.size(); ++i)
    pdf.data_err.ary[i] = sqrt(pdf.data_var.ary[i] + 
			       pdf.data_bias.ary[i]*pdf.data_bias.ary[i]);

  
  //Finally, our pdf is computed!
  //set status to valid
  pdf.valid=true;  
  
  //DEBUG
  //cout<<"PDF successfully extracted!"<<endl;

  return pdf;
}


ostream& operator<<(ostream& os, const DataSet& data)
{
  os<<endl<<"DataSet info: (size="<<data.data.size()<<")"<<endl;
  for(int i=0; i<data.data.size(); i++)
    os<<data.data[i]<<endl;
  return os;
}
