/*
 * TopicLearningGibbs.cpp
 *
 *  Created on: Apr 4, 2011
 *      Author: Clint
 */

#include "TopicLearningGibbs.h"



/**
 * Implements class constructor for the regular
 * topic learning method - Dirichlet hyper parameters
 * are fixed and symmetrical
 *
 */
TopicLearningGibbs::TopicLearningGibbs(size_t num_topics,
		size_t max_iterations,
		size_t burn_in_period,
		double alpha,
		double eta,
		string data_file,
		string vocab_file,
		size_t topic_upper_bound) : LDAPPMBase(max_iterations,
		data_file,
		vocab_file) {

	// Note: the following steps should follow the same order
	this->alpha_ = alpha;
	this->eta_ = eta;
	this->num_topics_ = num_topics;
	this->burn_in_period_ = burn_in_period;
	this->num_learned_topics_ = num_topics;
	this->alpha_vec_ = ones<vec>(this->num_topics_) * alpha;
	this->learning_method_ = Regular;
	this->topic_upper_bound_ = topic_upper_bound;

	this->init_z();
	this->init_theta();
	this->init_beta();

}

/**
 * Implements class constructor for the Stick Breaking
 * based topic learning - Dirichlet hyper parameters of
 * documents (Theta matrix) are generated from the Stick-breaking
 * strategy. The hyper parameter for topic-word distribution
 * (Beta matrix) is symmetric
 *
 *
 */
TopicLearningGibbs::TopicLearningGibbs(size_t num_topics,
		size_t max_iterations,
		size_t burn_in_period,
		double sb_a,
		double sb_b,
		double eta,
		string data_file,
		string vocab_file,
		size_t topic_upper_bound) : LDAPPMBase(max_iterations,
		data_file,
		vocab_file) {

	// Note: the following steps should follow the same order
	this->eta_ = eta;
	this->num_topics_ = num_topics;
	this->burn_in_period_ = burn_in_period;
	this->num_learned_topics_ = num_topics;
	this->sb_a_ = sb_a;
	this->sb_b_ = sb_b;
	this->alpha_vec_ = sample_stick_breaking_prior(this->num_topics_, sb_a, sb_b); // initial alpha vector
	this->learning_method_ = Stick_Breaking;
	this->topic_upper_bound_ = topic_upper_bound;

	this->init_z();
	this->init_theta();
	this->init_beta();

}

/**
 * Implements class destructor
 */

TopicLearningGibbs::~TopicLearningGibbs() {

}


void TopicLearningGibbs::init_z(){

	this->z_ = zeros<uvec>(this->num_word_instances_);

	for(size_t j = 0; j < this->num_word_instances_; j++)
		this->z_(j) = this->sample_uniform_int(this->num_topics_);

}

void TopicLearningGibbs::init_theta(){

	this->theta_counts_ = zeros(this->topic_upper_bound_, this->num_documents_);

	for (size_t d = 0; d < this->num_documents_; d++) // for each document
		this->theta_counts_.col(d) = calc_partition_counts(
				this->topic_upper_bound_, this->document_word_indices_[d]);

}

void TopicLearningGibbs::init_beta(){

	this->beta_counts_ = zeros(this->topic_upper_bound_, this->vocabulary_size_);

	for(size_t i = 0; i < this->num_word_instances_; i++)
		this->beta_counts_(this->z_(i), this->word_ids_(i)) += 1;

}

/**
 * Saves Gibbs sampler states to ASCII text
 * (MATLAB/R compatible)
 */

void TopicLearningGibbs::save_state(string state_name) {

	string z_file = state_name;
	string beta_file = state_name;
	string theta_file = state_name;
	string beta_file2 = state_name;
	string theta_file2 = state_name;

	z_file.append("_z.dat");
	beta_file.append("_beta_counts.dat");
	theta_file.append("_theta_counts.dat");
	beta_file2.append("_beta_samples.dat");
	theta_file2.append("_theta_samples.dat");

	this->z_.save(z_file, raw_ascii);
	this->beta_counts_.save(beta_file, raw_ascii);
	this->theta_counts_.save(theta_file, raw_ascii);
	this->beta_sample_.save(beta_file2, raw_ascii);
	this->theta_sample_.save(theta_file2, raw_ascii);

	if (this->burn_in_period_ > 0) { // handles burn in period

		z_file = state_name;
		beta_file = state_name;
		theta_file = state_name;

		z_file.append("_bp_z.dat");
		beta_file.append("_bp_beta_samples.dat");
		theta_file.append("_bp_theta_samples.dat");

		this->z_mode_.save(z_file, raw_ascii);
		this->beta_sample_bp_.save(beta_file, raw_ascii);
		this->theta_sample_bp_.save(theta_file, raw_ascii);

	}

}


vec TopicLearningGibbs::calc_partition_counts (
		size_t num_topics,
		vector<size_t> word_indices){

	register unsigned int i;
	register unsigned int num_elements = word_indices.size();

	vec partition_counts = zeros<vec>(num_topics);

	for (i = 0; i < num_elements; i++)
		partition_counts(this->z_(word_indices[i])) += 1;

	return partition_counts;
}


void TopicLearningGibbs::run_gibbs()
{
	vector < size_t > word_indices;
//	size_t bp_count = 0;
//	bool is_burn_in_started = false;
	bool flag = true;
	size_t ITER_MOD = 1;
//	size_t THRESHOLD = 200;
	ofstream log_file;
	size_t doc_idx = 0; // first document
	vec theta_counts;
	vec active_topic_counts;
	vec theta_sample;
	mat beta_sample;
	uvec active_topics = zeros<uvec>(this->topic_upper_bound_);
	size_t num_active_topics = this->topic_upper_bound_;
	rowvec zz_theta = zeros<rowvec>(this->num_documents_);
	rowvec zz_beta = zeros<rowvec>(this->vocabulary_size_);
	uvec original_topic;


	log_file.open ("run_gibbs_topic_learning.log");
	log_file << "\nGibbs sampling with topic learning: \n\n";


	// START GIBBS ITERATIONS


	for (size_t iter = 0; iter < this->max_iterations_; iter++){

		log_file << "iter #" << iter + 1;
		cout << "iter #" << iter + 1;

// TODO:
//		Beta counts goes to zero; need to fix this
//		Do allow document level topic learning,
//		if corpus level learning is completed

//		// TOPIC LEARNING -- CORPUS LEVEL (STEP 2)
//
//		if (flag){ // ? flag == true
//
//			vec topic_word_counts = sum(this->beta_counts_, 1); // sum over topic-rows
//			num_active_topics = 0;
//			active_topics.fill(0);
//
//			log_file << "\n    corpus counts: ";
//			for (size_t tt = 0; tt < this->topic_upper_bound_; tt++){
//
//				log_file << topic_word_counts(tt) << " ";
//
//				if (topic_word_counts(tt) > THRESHOLD){
//					active_topics(tt) = 1;
//					num_active_topics++;
//				}
//				else {
//					// updates theta and beta samples
//					// Note that Z vector could be still wrong,
//					// during the next iteration
//					this->beta_counts_.row(tt) = zz_beta;
//					this->theta_counts_.row(tt) = zz_theta;
//				}
//
//			}
//			log_file << endl;
//
//			this->num_learned_topics_ = num_active_topics;
//			log_file << "    active topics # " << num_active_topics << endl;
//
//		} // ? flag == true
//
//		// END TOPIC LEARNING -- CORPUS LEVEL


		// Selects a document for topic learning

		if (iter % ITER_MOD == 0){ //

			if (iter == 0 || doc_idx + 1 == this->num_documents_)
				doc_idx = 0;
			else
				doc_idx++;

			flag = false;

			log_file << "\n    selected doc #" << (doc_idx + 1) << endl;
			cout << "\n  selected doc #" << (doc_idx + 1);
		}



		for (size_t d = 0; d < this->num_documents_; d++){ // for each document

			word_indices = this->document_word_indices_[d];

			// STAGE 1: Sampling for THETA and BETA

			// only the selected document is allowed
			// to sample from the whole topic space
			if (!flag && doc_idx == d){ // restricts to the selected document and once
				active_topics.fill(1);
				num_active_topics = this->topic_upper_bound_;
			}

			this->theta_counts_.col(d) = theta_counts = calc_partition_counts(this->topic_upper_bound_, word_indices);

			active_topic_counts = zeros<vec>(num_active_topics);
			original_topic = zeros<uvec>(num_active_topics);
			for (size_t di = 0, active_count = 0; di < this->topic_upper_bound_; di++){
				if (active_topics(di)){
					active_topic_counts(active_count) = theta_counts(di); // we only consider active topics for z sampling
					original_topic(active_count) = di; // active topics to original topics mapping
					active_count++;
				}
			}

			theta_sample = sample_dirichlet_col_vec(num_active_topics, active_topic_counts + this->alpha_); // samples w/ all topics

			beta_sample = zeros<mat>(num_active_topics, this->vocabulary_size_);
			for(size_t k = 0, cnt = 0; k < this->topic_upper_bound_; k++){
				if (active_topics(k)){
					beta_sample.row(cnt) = sample_dirichlet_row_vec(this->vocabulary_size_, this->beta_counts_.row(k) + this->eta_);
					cnt++;
				}
			}

			// STAGE 2: Sampling for TOPICS (Z)

			// Excludes the document d's word-topic counts
			for(size_t i = 0; i < this->document_lengths_[d]; i++)
				if (active_topics(this->z_(word_indices[i])))
					this->beta_counts_(this->z_(word_indices[i]), this->word_ids_(word_indices[i])) -= 1;

			//  Samples Zs (word topic selection)

			for(size_t i = 0; i < this->document_lengths_[d]; i++)
				this->z_(word_indices[i]) = original_topic(
						sample_multinomial(theta_sample % beta_sample.col(this->word_ids_(word_indices[i]))));

			// updates beta counts
			for(size_t i = 0; i < this->document_lengths_[d]; i++)
				this->beta_counts_(this->z_(word_indices[i]), this->word_ids_(word_indices[i])) += 1;



			// TOPIC LEARNING -- DOCUMENT LEVEL (STEP 1)

			if (!flag && doc_idx == d){ // restricts to the selected document and once

				flag = true;
				theta_counts = calc_partition_counts(this->topic_upper_bound_, word_indices);
				num_active_topics = 0;
				active_topics.fill(0);

				log_file << "    doc counts: ";
				for (size_t tt = 0; tt < this->topic_upper_bound_; tt++){
					log_file << theta_counts(tt) << " ";
					if (theta_counts(tt) > 0){
						active_topics(tt) = 1;
						num_active_topics++;
					}
				}
				log_file << endl;

				this->num_learned_topics_ = num_active_topics;

			}

			// END TOPIC LEARNING -- DOCUMENT LEVEL


		} // END for each document


		double mdl_perplexity = calc_model_perplexity();

		log_file << "    active topics # " << this->num_learned_topics_ << endl
				<< "    perplexity: " << mdl_perplexity << endl;
		log_file.flush();
		cout << " active topics # " << this->num_learned_topics_
				<< " perplexity: " << mdl_perplexity << endl;



//		// Handles burn in period
//
//		if (this->burn_in_period_ > 0 && iter > this->burn_in_period_ - 1){
//
//			if (!is_burn_in_started){ // Initializes data structures for the first time
//				this->z_bp_ = zeros<umat>(this->num_word_instances_, (this->max_iterations_ - this->burn_in_period_));
//				this->z_mode_ = zeros<uvec>(this->num_word_instances_);
//
//				this->z_bp_.col(bp_count) = this->z_;
//				this->beta_sample_bp_ = this->beta_sample_;
//				this->theta_sample_bp_ = this->theta_sample_;
//				is_burn_in_started = true; // burn in period is started
//			}
//			else {
//				this->z_bp_.col(bp_count) = this->z_;
//				this->beta_sample_bp_ += this->beta_sample_;
//				this->theta_sample_bp_ += this->theta_sample_;
//			}
//
//			bp_count++;
//		}

	}

	log_file.close();

	// END GIBBS ITERATIONS

//	// Averages for burn in period
//	if (is_burn_in_started){
//		this->beta_sample_bp_ /= bp_count;
//		this->theta_sample_bp_ /= bp_count;
//		this->z_mode_ = find_mode(this->z_bp_);
//	}

}




double TopicLearningGibbs::calc_model_perplexity()
{
	double perplexity, ln_likelihood = 0, p1, p2, Z, prob_wd;
	vec partition_counts = zeros<vec>(this->topic_upper_bound_);

	// Calculates number of words assigned to each topic
	for (size_t t = 0; t < this->topic_upper_bound_; t++)
		partition_counts(t) = accu(this->theta_counts_.row(t)) + this->eta_;

	partition_counts += 1e-24;

	for (size_t i = 0; i < this->num_word_instances_; i++) {
		Z = prob_wd = 0;
		for (size_t t = 0; t < this->topic_upper_bound_; t++) {
			p1 = this->beta_counts_(t, this->word_ids_(i)) + this->eta_;
			p2 = this->theta_counts_(t, this->document_ids_(i)) + this->alpha_;
			Z += p2;
			prob_wd += p1 * p2 / partition_counts(t);
		}

		ln_likelihood += log(prob_wd / Z);
	}

	perplexity = exp(-ln_likelihood / this->num_word_instances_);

	return perplexity;
}

/*
 * This function calculates partition probability for
 * a document.
 *
 * Ref: LDA production partition model by George Casella
 *
 */
long double TopicLearningGibbs::calc_log_partition_probality() {

	long double partition_probability = 0.0;

	// Calculate partition counts from  m_ji' s; i' = 1 ... V
	vec partition_counts = sum(this->beta_counts_, 1); // sums over rows

	// ln_gamma (n_j * alpha_j + 1)
	vec ln_gamma_j = log_gamma_vec(partition_counts + this->alpha_);

	// ln_gamma (\sum_j n_j * alpha_j + K)
	long double ln_gamma_K = log_gamma(accu(partition_counts + this->alpha_));

	// ln a_j = \sum_i' (m_ji' * ln beta_ji')
	vec ln_a_j = sum(this->beta_counts_ % log(this->beta_sample_), 1); // sums over rows i.e. over i' s

	partition_probability = accu(ln_gamma_j + ln_a_j - ln_gamma_K); // sum over all j s

	return partition_probability;
}








