package com.kd.mining.lda.core;

import java.io.BufferedWriter;
import java.io.FileWriter;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.List;

import com.kd.mining.lda.core.Documents.Document;
import com.kd.utils.FileUtil;

/**
 * LDA Model training class
 * @author Administrator
 *
 */
public class LDAModel {

	//word index array
	private int [][] doc;
	//vocabulary size, topic number, document number
	private int V, K, M;
	//topic label array
	private int [][] z;
	//doc-topic dirichlet prior parameter
	private float alpha;
	//topic-word dirichlet prior parameter
	private float beta; 
	//given document m, count times of topic k. M*K
	private int [][] nmk;
	//given topic k, count times of term t. K*V
	private int [][] nkt;
	//Sum for each row in nmk
	private int [] nmkSum;
	//Sum for each row in nkt
	private int [] nktSum;
	//Parameters for topic-word distribution K*V
	private double [][] phi;
	//Parameters for doc-topic distribution M*K
	private double [][] theta;
	//Times of iterations
	private int iterations;
	//The number of iterations between two saving
	private int saveStep;
	//Begin save model at this iteration
	private int beginSaveIters;
	
	public LDAModel(ModelParameter param) {
		this.alpha = param.getAlpha();
		this.beta = param.getBeta();
		this.iterations = param.getIteration();
		this.saveStep = param.getSaveStep();
		this.beginSaveIters = param.getBeginSaveIters();
		this.K = param.getTopicNum();
	}
	
	public void init(Documents docSet) {
		List<Document> docs = docSet.getDocs();
		M = docs.size();
		V = docSet.getIndexToTerm().size();
		phi = new double[K][V];
		theta = new double[M][K];
		// initialize word index array for document set
		doc = new int[M][];
		for(int m=0; m<M; m++) {
			int[] terms = docs.get(m).getTerms();
			doc[m] = new int[terms.length];
			for(int n=0; n<terms.length; n++) {
				doc[m][n] = terms[n];
			}
		}
		// random for z(topic label array)
		z = new int[M][];
		nmk = new int[M][K];
		nkt = new int[K][V];
		nmkSum = new int[M];
		nktSum = new int[K];
		for(int m=0; m<M; m++) {
			int N = doc[m].length;
			z[m] = new int[N];
			for(int n=0; n<N; n++) {
				int initTopic = (int) Math.floor(Math.random()*K);
				if(K == initTopic) {
					initTopic = K-1;
				}
				z[m][n] = initTopic;
				nmk[m][initTopic]++;
				nkt[initTopic][doc[m][n]]++;
				nktSum[initTopic]++;
			}
			nmkSum[m] = N;
		}
	}
	
	public void train(Documents docSet, String resultDir) throws IOException {
		if(iterations < (saveStep+beginSaveIters)) {
			throw new RuntimeException("This iterations must be >= " + (saveStep+beginSaveIters));
		}
		for(int i=0; i<=iterations; i++) {
			System.out.println("iteration: " + i + " times start...");
			// save and back up at a save point
			if(i>=beginSaveIters && 0==(i-beginSaveIters)%saveStep) {
				System.out.println("start estimate and backup a time.");
				estimate();
				savePoint(i, docSet, resultDir);
				System.out.println("finish to estimate and backup a time.");
			}
			// train process
			for(int m=0; m<M; m++) {
				int N = doc[m].length;
				for(int n=0; n<N; n++) {
					int newTopic = sampleTopic(m, n);
					z[m][n] = newTopic;
				}
			}
		}
	}
	
	public int sampleTopic(int m, int n) {
		// Sample from p(z_i|z_-i, w) using Gibbs upde rule
		// Remove topic label for w_{m,n}
		int oldTopic = z[m][n];
		nmk[m][oldTopic]--;
		nkt[oldTopic][doc[m][n]]--;
		nmkSum[m]--;
		nktSum[oldTopic]--;
		
		double[] p = new double[K];
		for(int k=0; k<K; k++) {
			// calculate p(w|d) = p(w|t) * p(t|d)
			p[k] = ((nkt[k][doc[m][n]]+beta)/(nktSum[k] + V*beta))*((nmk[m][k]+alpha)/(nmkSum[m]+K*alpha));
		}
		// filter sort the most fit new topic
		int newTopic = 0;
		for(int k=1; k<K; k++) {
			p[k] += p[k-1];
		}
		double u = Math.random()*p[K-1];
		for(newTopic=0; newTopic<K; newTopic++) {
			if(u < p[newTopic]) {
				break;
			}
		}
		nmk[m][newTopic]++;
		nkt[newTopic][doc[m][n]]++;
		nmkSum[m]++;
		nktSum[newTopic]++;
		return newTopic;
	}
	
	public void estimate() {
		// estimate to topic-word distribution
		for(int k=0; k<K; k++) {
			for(int v=0; v<V; v++) {
				phi[k][v] = (nkt[k][v]+beta)/(nktSum[k]+V*beta);
			}
		}
		// estimate to document-topic distribution
		for(int m=0; m<M; m++) {
			for(int k=0; k<K; k++) {
				theta[m][k] = (nmk[m][k]+alpha)/(nmkSum[m]+K*alpha);
			}
		}
	}
	
	public void savePoint(int savePoint, Documents docSet, String resultDir) throws IOException {
		// save to a file(.params) for lda.params lda.phi lda.theta lda.tassign lda.twords
		//lda.params
		String resultPath = LDAModel.class.getClassLoader().getResource(resultDir).getPath();
		String modelName = "lda_" + savePoint;
		ArrayList<String> lines = new ArrayList<String>();
		lines.add("alpha = " + alpha);
		lines.add("beta = " + beta);
		lines.add("topicNum = " + K);
		lines.add("docNum = " + M);
		lines.add("termNum = " + V);
		lines.add("iterations = " + iterations);
		lines.add("saveStep = " + saveStep);
		lines.add("beginSaveIters = " + beginSaveIters);
		FileUtil.writeLines(resultPath + modelName + ".params", lines);
		
		//lda.phi K*V
		BufferedWriter writer = new BufferedWriter(new FileWriter(resultPath + modelName + ".phi"));		
		for (int i = 0; i < K; i++){
			for (int j = 0; j < V; j++){
				writer.write(phi[i][j] + "\t");
			}
			writer.write("\n");
		}
		writer.close();
		
		//lda.theta M*K
		writer = new BufferedWriter(new FileWriter(resultPath + modelName + ".theta"));
		for(int i = 0; i < M; i++){
			for(int j = 0; j < K; j++){
				writer.write(theta[i][j] + "\t");
			}
			writer.write("\n");
		}
		writer.close();
		
		//lda.tassign
		writer = new BufferedWriter(new FileWriter(resultPath + modelName + ".tassign"));
		for(int m = 0; m < M; m++){
			for(int n = 0; n < doc[m].length; n++){
				writer.write(doc[m][n] + ":" + z[m][n] + "\t");
			}
			writer.write("\n");
		}
		writer.close();
		
		//lda.twords phi[][] K*V
		writer = new BufferedWriter(new FileWriter(resultPath + modelName + ".twords"));
		int topNum = 20; //Find the top 20 topic words in each topic
		for(int k = 0; k < K; k++){
			List<Integer> tWordsIndexArray = new ArrayList<Integer>(); 
			for(int v = 0; v < V; v++){
				tWordsIndexArray.add(new Integer(v));
			}
			Collections.sort(tWordsIndexArray, new TwordsComparable(phi[k]));
			writer.write("topic " + k + "\t:\t");
			for(int t = 0; t < topNum; t++){
				writer.write(docSet.getIndexToTerm().get(tWordsIndexArray.get(t)) + " " + phi[k][tWordsIndexArray.get(t)] + "\t");
			}
			writer.write("\n");
		}
		writer.close();
	}
	
	class TwordsComparable implements Comparator<Integer> {
		
		public double [] sortProb; // Store probability of each word in topic k
		
		public TwordsComparable (double[] sortProb){
			this.sortProb = sortProb;
		}

		public int compare(Integer o1, Integer o2) {
			//Sort topic word index according to the probability of each word in topic k
			if(sortProb[o1] > sortProb[o2]) {
				return -1;
			} else if(sortProb[o1] < sortProb[o2]) {
				return 1;
			} else {
				return 0;
			}
		}
	}
}
