/* This file is to implement Topics over Time (ToT) Model proposed by */
/* Xuerui Wang and Andrew McCallum*/
#ifndef TOT_CC
#define TOT_CC
#include<string>
#include<fstream>
#include<sstream>
#include<iostream>
#include<algorithm>
#include<iterator>
#include<vector>
#include<map>
#include<cstdlib>
#include<ctime>
#include<cmath>
#include "def.hpp"
#include "utils.hpp"
#include "topicmodel_lda_abstract.hpp"

typedef struct TimeBetaStruct{
  double t1;
  double t2;   
}TimeBeta;

typedef vector<TimeBeta> TopicsTime;
typedef vector<double> TimeBuffer;
typedef vector<TimeBuffer> TopicsTimeBuffer;

class ToT:public LatentDirichletAllocation{
private:
  int** n_m_k; // the number of times a term in document m assigned to topic k
  int* n_m;
  int** n_t_k; // the number of times term t assigned to topic k
  int* n_k;
  int BURN_IN;
  int SAMPLE_LAG;
  int  SAVE_TIME;
  int OPT_INTERVAL; // the interval of gibbs iterations for hyper-parameters optimization                                                                   
  int  LIKELIHOOD;
  TopicsTime psi;
  TopicsTimeBuffer psi_ss;

  double alpha_sum;
  double beta_sum;
  double time_likelihood;
  int switch_label; // the total number of tokens
  void Single_E_Step(bool); // a single swipe of data using Monte Carlo E step
  void Single_M_Step();
  void Initial_Sample();
  double  compute_training_likelihood();
  void TopicOut();
  void ModelOut();
  void ReadTemporalDocuments();
  void UpdateBetaDistribution(); // update per-topic temporal distribution through simple moment matching
  void CleanPsiSS();
  void Normalize_Time();
public:
  virtual void Inference();
  virtual void Inference(bool);
  virtual void Prediction();
  virtual double Likelihood();
  virtual int ReadDOCS();
  virtual int GetDocuments(int);
  ToT();
};

void ToT::UpdateBetaDistribution(){
  for(int k=0; k < PuzaDEF::Instance()->TOPIC_NUM;k++){
    double sample_mean = 0.0;
    double sample_variance = 0.0;
    for(vector<double>::iterator iter = psi_ss[k].begin(); iter != psi_ss[k].end(); iter++){
      sample_mean += (*iter);
    }
    sample_mean = sample_mean / psi_ss[k].size();
    for(vector<double>::iterator iter = psi_ss[k].begin(); iter != psi_ss[k].end(); iter++){
      sample_variance = sample_variance + ((*iter) - sample_mean) * ((*iter) - sample_mean);
    }
    sample_variance = sample_variance / psi_ss[k].size();
    psi[k].t1 = sample_mean * ((sample_mean * (1 - sample_mean))/(sample_variance) - 1);
    psi[k].t2 = (1 - sample_mean) * ((sample_mean * (1 - sample_mean))/(sample_variance) - 1);
    //    cout << k << " " << psi[k].t1 << " " << psi[k].t2 << endl;
  }
}

void ToT::CleanPsiSS(){
  for(int i=0; i < PuzaDEF::Instance()->TOPIC_NUM;i++){
    psi_ss[i].clear();
  }
}

void ToT::Normalize_Time(){
  double MAX_TIME = 0;
  for(int i=0; i < PuzaDEF::Instance()->DOC_NUM; i++){
    double temp_time = PuzaDEF::Instance()->data_timestamps[i];
    if(temp_time > MAX_TIME)
      MAX_TIME = temp_time;    
  }
  // normalize
  MAX_TIME = MAX_TIME + 2;
  for(int i=0; i < PuzaDEF::Instance()->DOC_NUM; i++)
    PuzaDEF::Instance()->data_timestamps[i] = (PuzaDEF::Instance()->data_timestamps[i] +1 ) / MAX_TIME;
}

ToT::ToT(){
  n_m = new int[PuzaDEF::Instance()->DOC_NUM];
  n_k = new int[PuzaDEF::Instance()->TOPIC_NUM];
  allocate_memory<int>(n_m_k, PuzaDEF::Instance()->DOC_NUM,PuzaDEF::Instance()->TOPIC_NUM);
  allocate_memory<int>(n_t_k,PuzaDEF::Instance()->TOPIC_NUM,PuzaDEF::Instance()->TERM_NUM);
  alpha_sum = get_sum(alpha,PuzaDEF::Instance()->TOPIC_NUM);
  beta_sum = get_sum(beta,PuzaDEF::Instance()->TERM_NUM);
  psi_ss.clear();
  psi.clear();

  // initialization
  for (int i=0; i < PuzaDEF::Instance()->DOC_NUM; i++) { n_m[i] = 0; }
  for (int i=0; i < PuzaDEF::Instance()->TOPIC_NUM; i++) { 
    n_k[i] = 0;
    TimeBeta new_topic_beta;
    new_topic_beta.t1 = 0.0;
    new_topic_beta.t2 = 0.0;
    psi.push_back(new_topic_beta);
    TimeBuffer new_buffer;
    psi_ss.push_back(new_buffer);
  }


  LEARNING_MODE = 0; // batch mode;  
  READ_MODE = 0; // this is already set in the base class
  BURN_IN = 100;
  SAMPLE_LAG = 25;
  SAVE_TIME = 0;
  OPT_INTERVAL = 25; // the interval of gibbs iterations for hyper-parameters optimization
  LIKELIHOOD = 20;
  PuzaLogger::Instance()->PutString("Gibbs Sampling for ToT initialized.");
}

void ToT::Initial_Sample(){
  CleanPsiSS();
  // assign initial labels
  int N=0;
  for (int i=0; i< PuzaDEF::Instance()->DOC_NUM; i++){
    TermList current_list = TM_DOCS[i];
    int term_count = current_list.size();
    double time_stamp = PuzaDEF::Instance()->data_timestamps[i];
    for (int j=0;j<term_count;j++){
      int term_id = current_list[j].term_id;
      //      int current_t = (int)(gsl_ran_flat(gBaseRand,0,1) * PuzaDEF::Instance()->TOPIC_NUM);
      int current_t = (int)random_uniform_distribution(0, PuzaDEF::Instance()->TOPIC_NUM);
      TM_DOCS[i][j].term_stat = current_t;
      n_m_k[i][current_t]++;
      n_t_k[current_t][term_id]++;
      n_k[current_t]++;
      n_m[i] ++;
      // update temporal statistics
      psi_ss[current_t].push_back(time_stamp);
      N ++ ;
    }
  }
}

int ToT::ReadDOCS(){
  return 0;
}

int ToT::GetDocuments(int choice){
  // batch mode
  return ReadDOCS();
}


void ToT::Single_E_Step(bool like){
  double* p = new double[PuzaDEF::Instance()->TOPIC_NUM];
  int new_topic = 0;
  double u = 0.0;
  switch_label = 0;
  time_likelihood = 0.0;
  for (int i=0;i<PuzaDEF::Instance()->DOC_NUM;i++){
    TermList current_list = TM_DOCS[i];
    int term_count = current_list.size();
    double temp_time = PuzaDEF::Instance()->data_timestamps[i];
    //    cout << i << endl;
    for (int j=0;j < term_count; j++){
      int term_id = current_list[j].term_id;
      int current_assignment = current_list[j].term_stat;
      // remove current                                                                                                                                     
      n_m_k[i][current_assignment]--;
      n_t_k[current_assignment][term_id] --;
      n_k[current_assignment]--;

      // sampling from multinomial
      
      for (int k=0; k< PuzaDEF::Instance()->TOPIC_NUM; k++){
        p[k] = ((n_t_k[k][term_id] + beta[term_id])/ (n_k[k] + beta_sum))  \
	  * ((n_m_k[i][k] + alpha[k])/(term_count - 1 + alpha_sum)) \
	  * beta_distribution(psi[k].t1, psi[k].t2, 1-temp_time);
	//	cout << p[k] << " ";
      }
      // normalize                                                                                                                                         
      for (int k=1; k< PuzaDEF::Instance()->TOPIC_NUM; k++){
        p[k] += p[k-1];
      }
      // sampling from uniform
      u = gsl_rng_uniform(PuzaDEF::Instance()->gBaseRand) * p[PuzaDEF::Instance()->TOPIC_NUM-1];

      for(new_topic = 0; new_topic < PuzaDEF::Instance()->TOPIC_NUM; new_topic++){
        if (u < p[new_topic])
          break;
      }

      if(new_topic >= PuzaDEF::Instance()->TOPIC_NUM){
	for(int k=0; k < PuzaDEF::Instance()->TOPIC_NUM; k++){
	  //	  cout << p[k] << ":" << beta_distribution(psi[k].t1,psi[k].t2,temp_time)<< " " << endl;;
	  cout << "n_t_k :" << n_t_k[k][term_id] << endl;
	  cout << "n_k :" << n_k[k] << endl;
	  cout << "n_m_k :" << n_m_k[i][k] << endl;
	  cout << ((n_t_k[k][term_id] + beta[term_id])/ (n_k[k] + beta_sum)) * ((n_m_k[i][k] + alpha[k])/(term_count - 1 + alpha_sum)) << endl;
	}
	cout << "TERM COUNT " << term_count << endl;
	cout << "alpha sum " << alpha_sum << endl;
	cout << endl;
	cout << "HELLO" << endl;
	exit(0);
      }
      n_m_k[i][new_topic]++;
      n_t_k[new_topic][term_id]++;
      n_k[new_topic]++;
      psi_ss[new_topic].push_back(temp_time);
      TM_DOCS[i][j].term_stat = new_topic;
      if(like == true){
	time_likelihood = time_likelihood + log(beta_distribution(psi[new_topic].t1,psi[new_topic].t2,1-temp_time));
      }
      if(new_topic != current_assignment)
        switch_label ++;
    }
  }
  delete[] p;
}

void ToT::Inference(){
  Inference(true);
}

void ToT::TopicOut(){
  ofstream outFile;
  string model_file = PuzaDEF::Instance()->output_file_name + ".theta";
  outFile.open(model_file.c_str());
  for (int i=0; i<PuzaDEF::Instance()->DOC_NUM; i++){
    outFile << PuzaDEF::Instance()->data_ids[i] << "\t";
    for(int k=0; k<PuzaDEF::Instance()->TOPIC_NUM; k++){
      double value = (n_m_k[i][k] + alpha[k]) / (n_m[i] + alpha_sum);
      outFile << k << ":" << value << " ";
    }
    outFile << endl;
  }
  outFile.flush();
  outFile.close();

  model_file = PuzaDEF::Instance()->output_file_name + ".phi";
  outFile.open(model_file.c_str());
  for (int i=0; i<PuzaDEF::Instance()->TERM_NUM; i++){
    outFile << i << "\t";
    for (int k=0; k<PuzaDEF::Instance()->TOPIC_NUM; k++){
      double value = (n_t_k[k][i] + beta[i]) / (n_k[k] + beta_sum);
      outFile << k << ":" << value << " ";
    }
    outFile << endl;
  }
  outFile.flush();
  outFile.close();
}

void ToT::ModelOut(){
  ofstream outFile;
  string model_file = PuzaDEF::Instance()->output_file_name + ".doc";
  outFile.open(model_file.c_str());
  for (int i=0; i<PuzaDEF::Instance()->DOC_NUM; i++){
    outFile << PuzaDEF::Instance()->data_ids[i] << "\t" << n_m[i] << "\t";
    for(int k=0; k<PuzaDEF::Instance()->TOPIC_NUM; k++){
      outFile << k << ":" << n_m_k[i][k] << " ";
    }
    outFile << endl;
  }
  outFile.flush();
  outFile.close();
  
  model_file = PuzaDEF::Instance()->output_file_name + ".word";
  outFile.open(model_file.c_str());
  for (int i=0; i<PuzaDEF::Instance()->TERM_NUM; i++){
    outFile << i << "\t";
    for (int k=0; k<PuzaDEF::Instance()->TOPIC_NUM; k++){
      outFile << k << ":" << n_t_k[k][i] << " ";
    }
    outFile << endl;
  }
  outFile.flush();
  outFile.close();

  model_file = PuzaDEF::Instance()->output_file_name + ".topic";
  outFile.open(model_file.c_str());
  for (int k=0; k < PuzaDEF::Instance()->TOPIC_NUM; k++){
    outFile << k << "\t" << n_k[k] << "\t" << psi[k].t1 << " " << psi[k].t2 << endl;
  }
  outFile.flush();
  outFile.close();
}

double ToT::compute_training_likelihood(){
  double result = 0.0;
  result += PuzaDEF::Instance()->TOPIC_NUM * log_gamma(beta_sum);
  for (int k = 0; k < PuzaDEF::Instance()->TOPIC_NUM; k++){
    for (int j = 0; j < PuzaDEF::Instance()->TERM_NUM; j++){
      result += log_gamma(n_t_k[k][j]+beta[j]);
      result -= log_gamma(beta[j]);
    }
    result -= log_gamma(n_k[k]+beta_sum);
  }

  result += PuzaDEF::Instance()->DOC_NUM * log_gamma(alpha_sum);
  for (int k = 0; k < PuzaDEF::Instance()->DOC_NUM; k++){
    for (int j = 0; j < PuzaDEF::Instance()->TOPIC_NUM; j++){
      result += log_gamma(n_m_k[k][j]+alpha[j]);
      result -= log_gamma(alpha[j]);
    }
    result -= log_gamma(n_m[k]+alpha_sum);
  }
  result += time_likelihood;
  return result;  
}

void ToT::Inference(bool saved){
  PuzaLogger::Instance()->PutString("Start Batch Gibbs Sampling for ToT");
  // the first iter
  Normalize_Time();
  Initial_Sample();
  UpdateBetaDistribution();

  int ITER = 0;
  while(1){
    CleanPsiSS();
    if((ITER > BURN_IN) && (ITER % SAMPLE_LAG == 0)){
      Single_E_Step(true);
      UpdateBetaDistribution();
      TopicOut();
      ModelOut();
      SAVE_TIME++;
    }
    else{
      Single_E_Step(false);
      UpdateBetaDistribution();
    }

    if (ITER>=500){
      break;
    }
    
    if (ITER % LIKELIHOOD == 0){
      // for every 20 iterations, we output the training set likelihood                                                                                    
      double training_likelihood = compute_training_likelihood();
      PuzaLogger::Instance()->PutString(" Likelihood:" + to_string<double>(training_likelihood,std::dec));
    }
    PuzaLogger::Instance()->PutString("Iteration "+to_string<int>(ITER,std::dec)+ " Label Switch:" + to_string<int>(switch_label,std::dec));
    ITER ++;
  }
  PuzaLogger::Instance()->PutString("Batch Gibbs Sampling  Finished.");
}

void ToT::Prediction(){
  
}

double ToT::Likelihood(){
  return 0.0;
}


#endif
