#include <iostream>
#include <vector>
#include <ctime>
#include <numeric>

#include "tensor.h"
#include "covariance.h"
#include "mtrand.h"
#include "dataset.h"
#include "pdf.h"
#include "vector_tools.h"
#include <vector>
#include <valarray>
#include <sstream>
#include <fstream>

#include "TFile.h"
#include "TChain.h"
#include "TH1D.h"
#include "TH2D.h"
#include "TProfile2D.h"
#include "TF1.h"
#include "TF2.h"
#include "TF3.h"
#include "TLorentzVector.h"
#include "TGraphErrors.h"
#include "TGraph2DErrors.h"
#include "make_list.h"

using namespace std;


typedef valarray<Double_t> valD;

//initialize random number generator
MTRand_int32 irand(time(0));

//global variable to see if normal bootstrap is used
bool normal_bootstrap=false;

const double PI=3.14159265359;
double bin_width=0.0005;

//declare minimum pt cut
double min_pt=150.;

TString operator+(const TString& a, int c)
{
  TString result(a);
  result+=c;
  return result;
}

Double_t Skew(Double_t x)
{
  //return p0*x*(1-x)*exp(-p1*x*x*x+p2*x*x+p3*x);
  double x1=1-x;
  x1*=x1; x1*=x1;
  double x2=2-x;
  x2*=x2; x2*=x2; x2*=x2; x2*=x2;
  return 29087.290570499557*x*x1/x2;
}

static const double norm=1.0/0.043727425015288;

Double_t DoublePeak(Double_t x)
{
  double xmx= x*(1-x);
  return xmx*xmx*(1.5+cos(8*x))*norm;
}


//find max of a function
double find_max(Double_t (*pfunc)(Double_t))
{
  double max=0;
  double max_val=0;

  for(double i=0; i<=1.0; i+=0.01)
    {
      double tmax=pfunc(i);
      if(tmax>max)
	{
	  max=tmax;
	  max_val=i;
	}
    }

  //do a more precise run
  for(double i=max_val-0.01; i<max_val+0.01 && i<=1.0; i+=0.001)
    if(i>0)
      {
	double tmax=pfunc(i);
	if(tmax>max)
	  max=tmax;
      }
	    
  //add 3% to max to compensate for error
  max*=1.03;  

  return max;
}


//generate function for 1D (unweighted)
valD
GenerateMC_1D( Double_t (*pfunc)(Double_t), int nevt, double max)
{
  valD result(nevt);

  //now generate nevt events
  int n_count=0;
  while(n_count<nevt)
    {
      double trand=irand.rand_double();
      double trand_acc=irand.rand_double()*max;

      //do a stochastic fluctuation
      /*
      double fluct= Trand.Gaus(0, 0.2) + trand;
      
      if(fluct <0 || fluct >1.0)
	continue;
      */

      if(trand_acc < pfunc(trand))
	{
	  result[n_count]=trand;
	  ++n_count;
	}
    }

  return result;
}


const double func_max_skew=find_max(Skew);
const double func_max_double=find_max(DoublePeak);

//generate a list for the PDF
vector<double> Generate_PDF_results(int nevt, 
				    vector<double>* err=NULL,
				    vector<double>* x=NULL,
				    vector<double>* var=NULL,
				    Double_t (*func_ptr)(Double_t) =Skew,
				    bool verbose=false)
{

  bool temp_bias_only =  DataSet::b_bias_only;
  DataSet::b_bias_only = true;

  //first make a dataset
  DataSet dataset(make_list<double>(0),
		  make_list<double>(1.0),
		  make_list<double>(bin_width));

  double func_max=func_max_skew;
  if(func_ptr == DoublePeak)
    {
      func_max=func_max_double;
    }

  valD MC= GenerateMC_1D(func_ptr, nevt, func_max);

  for(int i=0; i<MC.size(); ++i)
    dataset.Fill(make_list<double>(MC[i]));

  PDF PDF_func=dataset.ComputePDF(verbose);

  //dump the values and store them in a TGraph
  vector<double> y;
  vector<double> x_temp;
  vector<double> err_temp;
  
  for(double i=0; i<1.0; i+=0.0005)
    {
      double temp_err=0;
      
      if(err)
	y.push_back(PDF_func(make_list<double>(i), &temp_err));

      else
	y.push_back(PDF_func(make_list<double>(i)));

      x_temp.push_back(i);
      err_temp.push_back(temp_err);

      if(x)
	x->push_back(i);
      
      if(err)
	err->push_back(temp_err);
    }
  //do variance computation
  
  if(var)
    {
      int n_var=100;


      vector<double> mean(y.size(),0);
      vector<double> mean2(y.size(),0);

      //generate new DataSet
      for(int k=0; k<n_var; ++k)
	{
	  DataSet data_smear;

	  if(normal_bootstrap)
	    data_smear=dataset.GenerateFixDataSet();
	  
	  else
	    data_smear=dataset.GenerateDataSet();

	  PDF PDF_smear=data_smear.ComputePDF();
	  
	  for(int j=0; j<x_temp.size(); ++j)
	    {
	      double temp_err;
	      double temp_value = PDF_smear(make_list<double>(x_temp[j]), 
					    &temp_err);
	      double corr_value=temp_value - temp_err;
	      mean[j]+=corr_value;
	      mean2[j]+= (corr_value-y[j]+err_temp[j])*
		(corr_value-y[j]+err_temp[j]);
	    }
	}

      double factor=(n_var)/(n_var-1.0);

      for(int j=0; j<mean.size(); ++j)
	{
	  mean[j]/=n_var;
	  mean2[j]/=n_var;

	  var->push_back(sqrt(mean2[j]));
	}
    }

  DataSet::b_bias_only = temp_bias_only;
  
  return y;
}


struct mytable{
  double true_mean, mean, bias, var;
  
  //measured value
  double measure_actual, measure_mean, measure_bias, measure_var;

  TH1D* h_mean, *h_bias, *h_var, *h_sig, *h_sig_un;

};

//given a cut, get numbers corresponding
//nevt, bias and sqrt(variance)
mytable Generate_pair_true(double cut,
			   int nevt=200,
			   Double_t (*func_ptr)(Double_t)=Skew)			  
{
  bool temp_bias_only =  DataSet::b_bias_only;
  DataSet::b_bias_only = true;


  //get the true fraction of events passing 
  double true_frac=0;
  int nevt_mean=1000000;

  double func_max=func_max_skew;
  if(func_ptr == DoublePeak)
    {
      func_max=func_max_double;
    }
  valD MC1= GenerateMC_1D(func_ptr, nevt_mean, func_max);
  valD MC2= GenerateMC_1D(func_ptr, nevt_mean, func_max);
  int temp_count=0;
  for(unsigned int n=0; n<MC1.size(); ++n)
    {
      if(MC1[n]+MC2[n] > cut)
	temp_count++;
    }
  true_frac+=temp_count;  
  true_frac/=nevt_mean;

  //now the same assuming we only have nevt training events

  //repeat experiment n times
  int nevt_kernel=5000;
  double avg_bias=0;
  double avg_mean=0;
  double avg_mean2=0;
  double avg_mean_corr=0;
  double avg_mean2_corr=0;

  vector<double> measure_actual;
  vector<double> measure_actual_corr;
  vector<double> measure_mean;
  vector<double> measure_mean2;
  vector<double> measure_bias;
  vector<double> measure_var;
  vector<double> measure_var_corr;

  //number of smeared pdf for measured quantities
  int nevt_smear=100;
  mytable result;


  for(int i=0; i<nevt_kernel; ++i)
    {
      //build the kernel first
      valD MC= GenerateMC_1D(func_ptr, nevt, func_max);
      

      //make a dataset
      DataSet dataset(make_list<double>(0),
		      make_list<double>(1.0),
		      make_list<double>(bin_width));

      for(int j=0; j<MC.size(); ++j)
	dataset.Fill(make_list<double>(MC[j]));
      
      //get the pdf
      PDF PDF_func=dataset.ComputePDF(false);

      //now generate events
      PDF::MC_Generator gen=
	PDF_func.Generator(make_list<PDF::MC_Flag>
			   (PDF::OUTPUT));

      vector<PDF::MC_Event> evt1=
	gen.GenerateFast(make_list<double>(), 1000);

      vector<PDF::MC_Event> evt2=
	gen.GenerateFast(make_list<double>(), 1000);

      double t_frac=0;
      double t_frac_total=0;

      double t_frac_corr=0;
      double t_frac_total_corr=0;

      for(unsigned int j=0; j<evt1.size(); ++j)
	{
	  double temp_prod=evt1[j].weight*evt2[j].weight;
	  double temp_prod_corr=(evt1[j].weight - evt1[j].err)
	    *(evt2[j].weight - evt2[j].err);

	  if(evt1[j].value[0] + evt2[j].value[0] > cut)
	    {
	      t_frac+=temp_prod;
	      t_frac_corr+=temp_prod_corr;
	    }
	  
	  t_frac_total+=temp_prod;
	  t_frac_total_corr+=temp_prod_corr;
	}
      t_frac/=t_frac_total;
      t_frac_corr/=t_frac_total_corr;
      
      avg_mean+=t_frac;
      avg_mean2+=t_frac*t_frac;

      avg_mean_corr+=t_frac_corr;
      avg_mean2_corr+=t_frac_corr*t_frac_corr;

      //take the first 100 event and do an example study
      if(i<1000)
	{

	  if(i%5==0)
	    cout<<"toy integration progress: "<<i<<endl;
	  //make histograms
	  if(i==0)
	    {
	      double width_mean=10;
	      result.h_mean = new TH1D(TString("measure_mean_")+(int) cut*10,
				       "mean",
				       12000, 
				       -width_mean,
				       width_mean
				       );

	      result.h_bias = new TH1D(TString("measure_bias_")+(int) cut*10,
				       "bias",
				       12000, 
				       -width_mean,
				       width_mean);

	      
	      result.h_var = new TH1D(TString("measure_var_")+(int) cut*10, "var",
				       12000, 
				      -width_mean,
				      width_mean);


	      result.h_sig = new TH1D(TString("measure_sig_")+(int) cut*10, "sig",
				       12000, 
				      -width_mean,
				      width_mean);

	      result.h_sig_un = new TH1D(TString("measure_sig_un")+(int) cut*10, "sig_un",
					 12000, 
					 -width_mean,
					 width_mean);
				      
	    }

	  double t_measure_actual=0;
	  double t_measure_mean=0;
	  double t_measure_mean2=0;
	  double t_measure_mean_corr=0;
	  double t_measure_mean2_corr=0;
	  double t_measure_mean2_corr_test=0;
	  double t_measure_bias=0;
	  double t_measure_var=0;

	  //get the actual
	  measure_actual.push_back(t_frac);
	  measure_actual_corr.push_back(t_frac_corr);
	  
	  //now do the smearing
	  for(int s=0; s<nevt_smear; ++s)
	    {
	      DataSet data_smear;
	      
	      if(normal_bootstrap)
		data_smear=dataset.GenerateFixDataSet();

	      else
		data_smear=dataset.GenerateDataSet();
		

	      PDF PDF_smear=data_smear.ComputePDF();

	      PDF::MC_Generator gen_smear=
		PDF_smear.Generator(make_list<PDF::MC_Flag>
				    (PDF::OUTPUT));

	      vector<PDF::MC_Event> evt1_s=
		gen_smear.GenerateFast(make_list<double>(), 1000);
	      
	      vector<PDF::MC_Event> evt2_s=
		gen_smear.GenerateFast(make_list<double>(), 1000);
	      

	      double temp_frac=0;
	      double temp_frac_tot=0;
	      double temp_frac_star=0;
	      double temp_frac_tot_star=0;


	      for(unsigned int j=0; j<evt1_s.size(); ++j)
		{
		  double temp_prod=evt1_s[j].weight*evt2_s[j].weight;
		  double temp_prod_star=(evt1_s[j].weight-evt1_s[j].err)*
		    (evt2_s[j].weight-evt2_s[j].err);
		  
		  if(evt1_s[j].value[0] + evt2_s[j].value[0] > cut)
		    {
		      temp_frac+=temp_prod;
		      temp_frac_star+=temp_prod_star;
		    }
		  
		  temp_frac_tot+=temp_prod;
		  temp_frac_tot_star+=temp_prod_star;
		}
	      
	      temp_frac/=temp_frac_tot;
	      temp_frac_star/=temp_frac_tot_star;

	      t_measure_mean+=temp_frac;
	      //t_measure_mean2+=temp_frac*temp_frac;
	      t_measure_mean2+=(temp_frac-measure_actual[i])*
		(temp_frac-measure_actual[i]);
	      
	      t_measure_mean_corr+=temp_frac_star;
	      t_measure_mean2_corr+=(temp_frac_star-measure_actual_corr[i])*
		(temp_frac_star-measure_actual_corr[i]);

	      t_measure_mean2_corr_test+=(temp_frac_star)*(temp_frac_star);

	      t_measure_bias+=temp_frac_star;
	    }
	  
	  t_measure_mean/=nevt_smear;
	  t_measure_mean_corr/=nevt_smear;
	  t_measure_mean2/=nevt_smear;
	  t_measure_mean2_corr/=nevt_smear;
	  t_measure_mean2_corr_test/=nevt_smear;
	  t_measure_bias/=nevt_smear;	  

	  measure_mean.push_back(t_measure_mean);
	  measure_bias.push_back(t_measure_mean-t_measure_bias);
	  measure_var.push_back(sqrt(t_measure_mean2));
	  //measure_var_corr.push_back(sqrt(t_measure_mean2_corr));
	  measure_var_corr.push_back(
  sqrt((t_measure_mean2_corr_test - 
	t_measure_mean_corr*t_measure_mean_corr)*nevt_smear/(nevt_smear-1))
				     );
	}

    }

  avg_mean/=nevt_kernel;
  avg_mean2/=nevt_kernel;
  avg_mean_corr/=nevt_kernel;
  avg_mean2_corr/=nevt_kernel;

  result.true_mean=true_frac;
  result.mean=avg_mean;
  result.bias=avg_mean - true_frac;
  result.var=sqrt((avg_mean2_corr-avg_mean_corr*avg_mean_corr)*(nevt_kernel)/(nevt_kernel-1.));

  result.measure_actual=measure_actual[0];
  result.measure_mean=measure_mean[0];
  result.measure_bias=measure_bias[0];
  result.measure_var=measure_var[0];

  //fill the plots
  for(int i=0; i<measure_mean.size(); ++i)
    {
      result.h_mean->Fill((measure_actual[i]-result.true_mean)/result.true_mean);
      result.h_bias->Fill((measure_bias[i]-result.bias)/result.bias);
      result.h_var->Fill((measure_var_corr[i]-result.var)/result.var);
      result.h_sig->Fill((measure_actual_corr[i]-result.true_mean)/
			 measure_var_corr[i]);

      result.h_sig_un->Fill((measure_actual[i]-result.true_mean)/
			    measure_var[i]);
    }
    

  DataSet::b_bias_only = temp_bias_only;

  return result;
}



//Generate TGraph
//a function to filicate making TGraph for ROOT
TGraphErrors Generate_TGraph(PDF& pdf, string name="example")
{
  //assume that the pdf has rank 2
  if(pdf.Rank() != 1)
    {
      cout<<"ERROR: Generate_TGraph only works for 1D template"
	  <<endl;
      throw ;
    }

  //create a MC_Generator 
  //this is simply to dump all the values of the PDF
  PDF::MC_Generator gen=
    pdf.Generator(make_list<PDF::MC_Flag>
		  (PDF::OUTPUT));
  
  
  //GenerateFull does NOT use random numbers,
  //it simply dumps all the values of the pdf in each bin
  vector<PDF::MC_Event> evt=
    gen.GenerateFull(make_list<double>());
    
  //dump the values and store them in a TGraph
  vector<double> x,y,err;
  for(unsigned int i=0; i<evt.size(); ++i)
    {
      x.push_back(evt[i].value[0]);
      y.push_back(evt[i].weight);
      err.push_back(evt[i].err);
    }

  
  TGraphErrors result(x.size(), &x[0], &y[0], NULL, &err[0]);

  //give the plot a name
  result.SetName(name.c_str());
  
  return result;
}


int main(int argc, char **argv)
{
  //test random_poisson
  /*
  TH1D testtest("testtest", "testtest", 200, -500, 600);

  for(int i=0; i<100000; ++i)
    testtest.Fill(DataSet::Random_Poisson(101));

  TFile filefileout("testtest.root", "RECREATE");
  testtest.Write();
  filefileout.Close();

  return 1;
  */


  if(argc < 1+1)
    {
      
      cout<<"Usage: ./boostrap_example output.root [extra smoothing] [double_peak] <optional_normal_bootstrap>"
	  <<endl;

      throw;
    }
  
  
  //set random seed (can be custom)
  //only used for smearing templates
  DataSet::SetRandomSeed(time(0));
  

  //set extra smoothing factor
  //i.e. scaling up effective number of statistics
  //100 means less smoothing
  //i.e. the kernel width would be as if we have 100x more data
  //0.01 means more smoothing
  //i.e. the kernel width would be as if we have 0.01x data
  
  if(argc >= 2+1)
    {
      //we have extra smoothing
      cout<<"Extra smoothing: "<<argv[2]<<endl;
      
      DataSet::kernel_smoothing_factor=atof(argv[2]);
	
      
    }


  //default use the Skew function
  Double_t (*func_ptr)(Double_t)=Skew;
  
  if(argc >= 3+1 && string(argv[3]).find("ouble")!=string::npos)
    func_ptr=DoublePeak;


  if(argc >= 4+1)
    {
      cout<<"Altering bin width: "<<atof(argv[4])<<endl;
      bin_width=atof(argv[4]);
    }

  if(argc >= 5+1 && string(argv[3]).find("normal")!=string::npos)
    {
      cout<<"Normal bootstrap is used"<<endl;
      normal_bootstrap=true;
    }

  //turn on bias only in the error computation
  //otherwise the error includes bias^2 + variance for the templates
  /*
    DataSet::b_bias_only = true;
  */


  //output results
  TFile fileout(argv[1], "RECREATE");

  //get the real function
  vector<double> x_real;
  vector<double> yerr_real;

  cout<<"computing real function"<<endl;
  vector<double> y_real= Generate_PDF_results(10000, &yerr_real, &x_real,
					      NULL, func_ptr);
  
  //just compute the real function instead
  vector<double> new_x_real;
  vector<double> new_y_real;

  for(double i=0; i<1.0; i+=0.0005)
    {
      new_x_real.push_back(i);
      new_y_real.push_back(func_ptr(i));
    }


  TGraphErrors graph_real
    (new_x_real.size(), &new_x_real[0], &new_y_real[0], NULL, NULL);
  graph_real.SetName("real");
  graph_real.Write();


  //now do a full toy experiment
  int nexp=1000;

  vector<double> bias_real(x_real.size(),0);  
  vector<double> y_mean(x_real.size(),0);
  vector<double> y_mean_corr(x_real.size(),0);
  vector<double> y2_mean(x_real.size(),0);
  vector<double> y2_mean_corr(x_real.size(),0);
  vector<double> bias_mean(x_real.size(),0);
  vector<double> var_mean(x_real.size(),0);
  vector<double> var_real(x_real.size(),0);
  vector<double> err_real(x_real.size(),0);


  vector<double> y_example(x_real.size(),0);  
  vector<double> y_example_corr(x_real.size(),0);  
  vector<double> bias_example(x_real.size(),0);  
  vector<double> var_example(x_real.size(),0);  

  //vector<double> y_temp;
  vector<double> err_temp;
  vector<double> var_temp;

  DataSet::b_bias_only = true;

  //compute the real variance and bias
  for(int i=0; i<nexp; ++i)
    {
      if(i%50==0)
	cout<<"Progress: "<<i<<endl;

      err_temp.clear();
      var_temp.clear();
      
      bool verbose=false;

      if(i==0)
	verbose=true;
      
      vector<double> y_temp=Generate_PDF_results
	(500, &err_temp, NULL, &var_temp, func_ptr, verbose);


      //get the rescale factor for the corrected version
      double rescale= summation(y_temp)/summation(y_temp-err_temp);

      //keep one example
      if(i==0)
	{
	  y_example=y_temp;
	  bias_example=err_temp;
	  var_example=var_temp;
	  y_example_corr= (y_temp-err_temp);
	  y_example_corr*= rescale;
	}
      
      for(int j=0; j<y_temp.size(); ++j)
	{
	  y_mean[j]+=y_temp[j];
	  y_mean_corr[j]+=(y_temp[j]-err_temp[j])*rescale;
	  y2_mean[j]+=y_temp[j]*y_temp[j];
	  y2_mean_corr[j]+=(y_temp[j]-err_temp[j])*rescale
	    *(y_temp[j]-err_temp[j])*rescale;
	  bias_mean[j]+=err_temp[j];
	  var_mean[j]+=var_temp[j];
	}      
    }
  
  for(int j=0; j<y_mean.size(); ++j)
    {

      y_mean[j]/=nexp;
      y_mean_corr[j]/=nexp;
      y2_mean[j]/=nexp;
      y2_mean_corr[j]/=nexp;
      bias_mean[j]/=nexp;
      var_mean[j]/=nexp;

      bias_real[j]=y_mean[j]-new_y_real[j];
      //var_real[j]=sqrt(fabs(y2_mean[j]-y_mean[j]*y_mean[j])*(nexp)/(nexp-1.));
      var_real[j]=sqrt(fabs(y2_mean_corr[j]-y_mean_corr[j]*y_mean_corr[j])
		       *(nexp)/(nexp-1));
      err_real[j]=sqrt(var_real[j]*var_real[j] + bias_real[j]*bias_real[j]);
    }

  TGraphErrors graph_toy(x_real.size(), &x_real[0], &y_mean_corr[0], 
			 NULL, &err_real[0]);
  graph_toy.SetName("graph_toyMC");
  graph_toy.Write();


  TGraphErrors graph_example(x_real.size(), &x_real[0], &y_example_corr[0], 
			     NULL, &var_example[0]);
  graph_example.SetName("graph_example");
  graph_example.Write();

  TGraphErrors real_bias(x_real.size(), &x_real[0], &bias_real[0], 
			 NULL, NULL);
  
  real_bias.SetName("real_bias");
  real_bias.Write();


  TGraphErrors example_bias(x_real.size(), &x_real[0], &bias_example[0], 
			 NULL, NULL);
  
  example_bias.SetName("example_bias");
  example_bias.Write();



  TGraphErrors cal_bias(x_real.size(), &x_real[0], &bias_mean[0], 
			 NULL, NULL);
  
  cal_bias.SetName("cal_bias");
  cal_bias.Write();


  TGraphErrors real_var(x_real.size(), &x_real[0], &var_real[0], 
			NULL, NULL);
  
  real_var.SetName("real_var");
  real_var.Write();
  
  TGraphErrors example_var(x_real.size(), &x_real[0], &var_example[0], 
			NULL, NULL);
  
  example_var.SetName("example_var");
  example_var.Write();


  TGraphErrors cal_var(x_real.size(), &x_real[0], &var_mean[0], 
			NULL, NULL);
  
  cal_var.SetName("cal_var");
  cal_var.Write();


  //now do the "convolution"

  //some testing
  //make smoothing factor 100 times
  //DataSet::kernel_smoothing_factor=1000000;


  for(double cut = 0.2; cut<2.0; cut+=0.4)
    {
      mytable result=Generate_pair_true(cut, 500, func_ptr);
      cout<<"cut: "<<cut<<endl;
      cout<<"true mean: "<<result.true_mean<<endl;
      cout<<"mean: "<<result.mean<<endl;
      cout<<"bias: "<<result.bias<<endl;
      cout<<"var: "<<result.var<<endl;
      cout<<"measure actual: "<<result.measure_actual<<endl;
      cout<<"measure mean: "<<result.measure_mean<<endl;
      cout<<"measure bias: "<<result.measure_bias<<endl;
      cout<<"measure var: "<<result.measure_var<<endl;
      cout<<endl;
      
      result.h_mean->Write();
      result.h_bias->Write();
      result.h_var->Write();
      result.h_sig->Write();
      result.h_sig_un->Write();
    }
  //close the root file
  fileout.Close();

  cout<<"Program ends."<<endl;
  return 1;
}
  
