package edu.isi.pedwork.topic;

import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.FileReader;
import java.io.IOException;
import java.io.LineNumberReader;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
import java.io.PrintWriter;
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.PreparedStatement;
import java.sql.SQLException;
import java.sql.Statement;
import java.text.DecimalFormat;
import java.text.NumberFormat;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashSet;
import java.util.Hashtable;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.Map;
import java.util.PriorityQueue;
import java.util.TreeMap;
import java.io.Serializable;

import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.CommandLineParser;
import org.apache.commons.cli.GnuParser;
import org.apache.commons.cli.Option;
import org.apache.commons.cli.OptionBuilder;
import org.apache.commons.cli.Options;

import edu.isi.pedwork.Configure;
import edu.isi.pedwork.data.Pair;

public class LabelLDAModel implements Serializable {
	static DecimalFormat Sformat = new DecimalFormat(".###");

	public enum Type {
		LEARNING, INFERENCE
	}

	Type type;
	Hashtable<Integer, String> idxTerm = new Hashtable<Integer, String>();
	Hashtable<Integer, String> idxLabel = new Hashtable<Integer, String>();
	Hashtable<String, Integer> labelIdMap = new Hashtable<String, Integer>();
	Hashtable<String, Integer> termIdMap = new Hashtable<String, Integer>();

	transient int[][] documents;
	transient int numstats;
	transient int[][] L;
	transient double[] Lsum;
	transient int D;
	transient Hashtable<Integer, String> idxDoc = null;

	int V;
	int K;

	double alpha = 2.0;
	double beta = 0.2;
	int z[][]; // z[doc][word] = topic
	int[][] nw; // nw[word][topic] number of instances of word i (term?)
	// assigned to topic j.
	int[][] nd; // na[doc][topic] number of words in document i assigned to
	// topic j.
	int[] nwsum; // nwsum[j] total number of words assigned to topic j.
	int[] ndsum; // ndsum[i] total number of topic in document i.
	double[][] thetasum;// cumulative statistics of theta: doc-topic
	double[][] phisum;// cumulative statistics of phi: word-topic

	int newz[][];
	int[][] newnw;
	int[][] newnd;
	int[] newnwsum;
	int[] newndsum;
	double[][] newthetasum;
	double[][] newphisum;

	public static int BURN_IN = 200; // 200
	public static int ITERATIONS = 2000; // 1000
	public static int SAMPLE_LAG = 50; // 1
	public static int NBestWords = 20;

	public LabelLDAModel(Type inputtype) {
		// TODO Auto-generated constructor stub
		this.type = inputtype;
	}

	public void load_inference_data(String docPath, int docCol, int labelCol,
			int termCol) throws Exception {
		LinkedList<ArrayList<String[]>> lines = load_document_file(docPath,
				labelCol, termCol);
		construct_document_matrix(lines);
	}

	public void load_inference_data_stream(
			LinkedList<ArrayList<String[]>> lines, int docCol, int labelCol,
			int termCol) throws Exception {

		construct_document_matrix(lines);
	}

	public void load_learning_data(String inputfile, int docCol, int labelCol,
			int termCol) throws Exception {
		LinkedList<ArrayList<String[]>> lines = load_document_file(inputfile,
				labelCol, termCol);
		for (int d = 0; d < lines.size(); d++) {
			ArrayList<String[]> curLine = lines.get(d);
			String[] labels = curLine.get(1);
			String[] terms = curLine.get(2);
			for (int i = 0; i < terms.length; i++) {
				String term = terms[i];
				if (!termIdMap.containsKey(term)) {
					termIdMap.put(term, termIdMap.size());
					idxTerm.put(idxTerm.size(), term);
				}
			}
			if (!labels[0].equals("")) {
				for (int i = 0; i < labels.length; i++) {
					if (!labelIdMap.containsKey(labels[i])) {
						labelIdMap.put(labels[i], labelIdMap.size());
						idxLabel.put(idxLabel.size(), labels[i]);
					}
				}
			}
		}
		// Vocabulary size
		this.V = termIdMap.size();
		// Num of topics is equal to the number of labels
		this.K = labelIdMap.size();
		System.out.println("Finish loading vocabulary and label set");
		for (int i = 0; i < idxLabel.size(); i++)
			System.out.print(idxLabel.get(i) + ",");
		System.out.println();
		construct_document_matrix(lines);
	}

	private LinkedList<ArrayList<String[]>> load_document_file(
			String inputfile, int labelCol, int termCol) throws Exception {
		LineNumberReader lr = new LineNumberReader(new FileReader(inputfile));
		LinkedList<ArrayList<String[]>> lines = new LinkedList<ArrayList<String[]>>();
		String tmpLine = "";
		while ((tmpLine = lr.readLine()) != null)
			if (tmpLine.trim().length() != 0) {
				ArrayList<String[]> line = new ArrayList<String[]>(3);
				String[] tokens = tmpLine.trim().split("\t");
				String[] doc_id = new String[1];
				doc_id[0] = tokens[0];
				line.add(doc_id);
				String[] labels = tokens[labelCol].trim().split(" ");
				line.add(labels);
				String[] terms = tokens[termCol].trim().split(" ");
				line.add(terms);
				lines.add(line);
			}
		return lines;
	}

	private void construct_document_matrix(LinkedList<ArrayList<String[]>> lines) {
		this.idxDoc = new Hashtable<Integer, String>();
		this.D = lines.size();
		this.documents = new int[this.D][];
		this.L = new int[D][this.K];
		this.Lsum = new double[this.D];

		for (int d = 0; d < lines.size(); d++) {
			ArrayList<String[]> curLine = lines.get(d);
			String[] labels = curLine.get(1);
			String[] terms = curLine.get(2);
			idxDoc.put(d, curLine.get(0)[0]);
			int length = 0;
			for (int i = 0; i < terms.length; i++) {
				if (termIdMap.containsKey(terms[i]))
					length++;
			}
			this.documents[d] = new int[length];
			int j = 0;
			for (int i = 0; i < terms.length; i++) {
				if (termIdMap.containsKey(terms[i]))
					this.documents[d][j++] = termIdMap.get(terms[i]);
			}
			double lsum = 0;
			if (labels[0].equals("")) {
				// If empty label, set all labels
				lsum = this.K;
				for (int i = 0; i < this.idxLabel.size(); i++) {
					this.L[d][i] = 1;
				}
			} else {
				for (int i = 0; i < labels.length; i++) {
					this.L[d][labelIdMap.get(labels[i])] = 1;
					lsum++;
				}
			}
			this.Lsum[d] = lsum;
		}
	}

	private void learning_initial_state() {
		int M = documents.length;
		nw = new int[V][K];// each word's topics
		nd = new int[M][K];// each doc's topics
		nwsum = new int[K];// total # of words assign to topic
		ndsum = new int[M];// total # of docs assign to topic
		z = new int[M][];// topic assignments for each word N for each document
		// M.
		for (int m = 0; m < M; m++) {

			int N = documents[m].length;// each doc's length
			z[m] = new int[N];// assign int arraty for the number of words in
			// this doc
			for (int n = 0; n < N; n++) { // for each word assign topic
				int topic = (int) (Math.random() * K);
				z[m][n] = topic;
				nw[documents[m][n]][topic]++;
				nd[m][topic]++;
				nwsum[topic]++;
			}
			ndsum[m] = N; // total number of words in document i
		}
	}

	private void inference_initial_state() {
		int M = documents.length;
		newnw = new int[V][K];// each word's topics
		newnd = new int[M][K];// each doc's topics
		newnwsum = new int[K];// total # of words assign to topic
		newndsum = new int[M];// total # of docs assign to topic
		newz = new int[M][];
		for (int m = 0; m < M; m++) {
			int N = documents[m].length;// each doc's length
			newz[m] = new int[N];// assign int arraty for the number
			for (int n = 0; n < N; n++) { // for each word assign topic
				int topic = (int) (Math.random() * K);
				newz[m][n] = topic;
				newnw[documents[m][n]][topic]++;
				newnd[m][topic]++;
				newnwsum[topic]++;
			}
			newndsum[m] = N; // total number of words in document i
		}
	}

	private String print_model_settings() {
		String out = "";
		out += "Model type: " + this.type.toString() + "\n";
		out += "Num of Burn-in Iteration: " + this.BURN_IN + "\n";
		out += "Num of Total Iteration: " + this.ITERATIONS + "\n";
		out += "Sample Lag: " + this.SAMPLE_LAG + "\n";
		out += "Num of docs: " + this.D + "\n";
		out += "Num of topics: " + this.K + "\n";
		out += "Vocabulary size: " + this.V + "\n";
		out += "Alpha: " + this.alpha + "\n";
		out += "Beta: " + this.beta + "\n";

		return out;
	}

	public void learning() {
		System.out.println(print_model_settings());
		if (SAMPLE_LAG > 0) {
			thetasum = new double[documents.length][K];
			phisum = new double[K][V];
			numstats = 0;
		}
		learning_initial_state();
		for (int i = 0; i < ITERATIONS; i++) {
			if (i % 100 == 0)
				System.out.println("Sampling " + i + " of " + ITERATIONS
						+ " iterations");
			for (int m = 0; m < z.length; m++) {
				for (int n = 0; n < z[m].length; n++) {
					int topic = learning_sample(m, n);
					z[m][n] = topic;
				}
			}
			if ((i > BURN_IN) && (SAMPLE_LAG > 0) && (i % SAMPLE_LAG == 0))
				learning_theta_phi();
		}
	}

	public void inference() {
		System.out.println(print_model_settings());
		int nsamples = 0;
		double score = 0;
		if (SAMPLE_LAG > 0) {
			newthetasum = new double[documents.length][K];
			newphisum = new double[K][V];
			numstats = 0;
		}
		inference_initial_state();
		for (int i = 0; i < ITERATIONS; i++) {
			if (i % 100 == 0)
				System.out.println("Held-out " + i + " of " + ITERATIONS
						+ " iterations");
			for (int m = 0; m < newz.length; m++) {
				for (int n = 0; n < newz[m].length; n++) {
					int topic = inference_sample(m, n);
					newz[m][n] = topic;
				}
			}
			if ((i >= BURN_IN) && (SAMPLE_LAG > 0) && (i % SAMPLE_LAG == 0)) {
				inference_theta();

				score += perplexity();
				nsamples += 1;
			}
		}

		score = score / nsamples;
		System.out.println("mean held-out score = " + score + " from "
				+ nsamples + " samples");

	}

	public void inference_no_perplexity() {
		System.out.println(print_model_settings());
		int nsamples = 0;
		double score = 0;
		if (SAMPLE_LAG > 0) {
			newthetasum = new double[documents.length][K];
			newphisum = new double[K][V];
			numstats = 0;
		}
		inference_initial_state();
		for (int i = 0; i < ITERATIONS; i++) {
			if (i % 100 == 0)
				System.out.println("Held-out " + i + " of " + ITERATIONS
						+ " iterations");
			for (int m = 0; m < newz.length; m++) {
				for (int n = 0; n < newz[m].length; n++) {
					int topic = inference_sample(m, n);
					newz[m][n] = topic;
				}
			}
			if ((i >= BURN_IN) && (SAMPLE_LAG > 0) && (i % SAMPLE_LAG == 0)) {
				inference_theta();

				// score += perplexity();
				// nsamples += 1;
			}
		}

		// score = score / nsamples;
		// System.out.println("mean held-out score = "+ score+ " from "+
		// nsamples+" samples");

	}

	private int learning_sample(int m, int n) {
		int topic = z[m][n];
		nw[documents[m][n]][topic]--;
		nd[m][topic]--;
		nwsum[topic]--;
		ndsum[m]--;
		double[] p = new double[K];
		for (int k = 0; k < K; k++) {
			p[k] = (nw[documents[m][n]][k] + beta) / (nwsum[k] + V * beta)
					* (nd[m][k] + alpha * L[m][k])
					/ (ndsum[m] + alpha * Lsum[m]);
		}
		for (int k = 1; k < p.length; k++)
			p[k] += p[k - 1];
		double u = Math.random() * p[K - 1];
		for (topic = 0; topic < p.length; topic++) {
			if (u < p[topic])
				break;
		}
		// add newly estimated z_i to count variables
		nw[documents[m][n]][topic]++;
		nd[m][topic]++;
		nwsum[topic]++;
		ndsum[m]++;
		return topic;
	}

	private int inference_sample(int m, int n) {
		int topic = newz[m][n];
		int w = documents[m][n];
		newnw[w][topic]--;
		newnd[m][topic]--;
		newnwsum[topic]--;
		newndsum[m]--;
		double[] p = new double[K];
		double Vbeta = V * beta;
		for (int k = 0; k < K; k++) {
			p[k] = (nw[w][k] + newnw[w][k] + beta)
					/ (nwsum[k] + newnwsum[k] + Vbeta)
					* (newnd[m][k] + alpha * L[m][k])
					/ (newndsum[m] + alpha * Lsum[m]);
		}
		for (int k = 1; k < p.length; k++)
			p[k] += p[k - 1];
		double u = Math.random() * p[K - 1];
		for (topic = 0; topic < p.length; topic++) {
			if (u < p[topic])
				break;
		}
		// add newly estimated z_i to count variables
		newnw[w][topic]++;
		newnd[m][topic]++;
		newnwsum[topic]++;
		newndsum[m]++;
		return topic;
	}

	private void inference_theta() {
		for (int m = 0; m < documents.length; m++) {
			for (int k = 0; k < K; k++) {
				newthetasum[m][k] += (newnd[m][k] + alpha * L[m][k])
						/ (newndsum[m] + alpha * Lsum[m]);
			}
		}
		numstats++;
	}

	private void learning_theta_phi() {
		for (int m = 0; m < documents.length; m++) {
			for (int k = 0; k < K; k++) {
				thetasum[m][k] += (nd[m][k] + alpha * L[m][k])
						/ (ndsum[m] + alpha * Lsum[m]);
			}
		}
		double VBeta = this.V * this.beta;
		for (int k = 0; k < K; k++) {
			for (int w = 0; w < V; w++) {
				phisum[k][w] += (nw[w][k] + beta) / (nwsum[k] + VBeta);
			}
		}
		numstats++;
	}

	static Connection conn = null;
	static Statement stmt = null;

	public static void init_db() throws SQLException {
		conn = DriverManager.getConnection(Configure.dbPath);
		stmt = conn.createStatement();
	}

	public void close_db() throws SQLException {
		stmt.close();
		conn.close();
	}

	public void update_theta(String subwikiid) throws Exception {
		init_db();
		PreparedStatement INSERT = conn
				.prepareStatement("insert into user_theta (userId,Art_Design,Books,Business,Charity,Entertainment,Family,Fashion,Food_Drink,Health,Music,News,Scien_Tech,Sports) values (?,?,?,?,?,?,?,?,?,?,?,?,?,?) ");

		double[][] _thetasum = null;
		_thetasum = this.newthetasum;
		double[][] theta = new double[documents.length][K];
		for (int m = 0; m < documents.length; m++) {
			System.out.println(this.idxDoc.get(m));
			INSERT.setInt(1, Integer.parseInt(this.idxDoc.get(m)));
			double sum = 0.0;
			for (int k = 0; k < K; k++)
				sum += _thetasum[m][k];
			for (int k = 0; k < K; k++) {
				theta[m][k] = _thetasum[m][k] / sum;
				String score = Sformat.format((Double) (theta[m][k]));
				Integer nn = 0;
				if (idxLabel.get(k).equals("Art_Design"))
					INSERT.setString(2, score);
				else if (idxLabel.get(k).equals("Books"))
					INSERT.setString(3, score);
				else if (idxLabel.get(k).equals("Business"))
					INSERT.setString(4, score);
				else if (idxLabel.get(k).equals("Charity"))
					INSERT.setString(5, score);
				else if (idxLabel.get(k).equals("Entertainment"))
					INSERT.setString(6, score);
				else if (idxLabel.get(k).equals("Family"))
					INSERT.setString(7, score);
				else if (idxLabel.get(k).equals("Fashion"))
					INSERT.setString(8, score);
				else if (idxLabel.get(k).equals("Food_Drink"))
					INSERT.setString(9, score);
				else if (idxLabel.get(k).equals("Health"))
					INSERT.setString(10, score);
				else if (idxLabel.get(k).equals("Music"))
					INSERT.setString(11, score);
				else if (idxLabel.get(k).equals("News"))
					INSERT.setString(12, score);
				else if (idxLabel.get(k).equals("Scien_Tech"))
					INSERT.setString(13, score);
				else if (idxLabel.get(k).equals("Sports"))
					INSERT.setString(14, score);
			}
			INSERT.execute();
		}
		close_db();
	}

	public double[][] output_theta(String path) throws Exception {
		double[][] _thetasum = null;
		switch (this.type) {
		case LEARNING:
			_thetasum = this.thetasum;
			break;
		case INFERENCE:
			_thetasum = this.newthetasum;
			break;
		}
		double[][] theta = new double[documents.length][K];
		PrintWriter out = new PrintWriter(new FileOutputStream(path));
		out.print("ThreadID,");
		for (int i = 0; i < K; i++) {
			out.print(idxLabel.get(i));
			if (i != (K - 1))
				out.print(",");
		}
		out.println();
		for (int m = 0; m < documents.length; m++) {
			out.print(this.idxDoc.get(m) + ",");
			double sum = 0.0;
			for (int k = 0; k < K; k++)
				sum += _thetasum[m][k];
			for (int k = 0; k < K; k++) {
				theta[m][k] = _thetasum[m][k] / sum;
				out.print(theta[m][k]);
				if (k != K - 1)
					out.print(",");
			}
			out.println();
		}
		out.close();
		return theta;
	}

	public void output_terms(String path) throws Exception {
		PrintWriter out = new PrintWriter(new FileOutputStream(path));
		for (int w = 0; w < V; w++) {
			out.println(this.idxTerm.get(w));
		}
		out.close();
	}

	public void output_phi(String path) throws Exception {
		double[][] _phisum = this.phisum;
		double[][] phi = new double[K][V];

		PrintWriter out = new PrintWriter(new FileOutputStream(path));
		for (int k = 0; k < K; k++) {
			double sum = 0.0;
			for (int w = 0; w < V; w++)
				sum += _phisum[k][w];
			for (int w = 0; w < V; w++)
				phi[k][w] = _phisum[k][w] / sum;

			PriorityQueue<Pair<Double, Integer>> q = new PriorityQueue<Pair<Double, Integer>>();
			for (int w = 0; w < V; w++) {
				if (q.size() < this.NBestWords)
					q.add(new Pair<Double, Integer>(phi[k][w], w));
				else {
					if (q.peek().getFirst() < phi[k][w]) {
						q.poll();
						q.add(new Pair<Double, Integer>(phi[k][w], w));
					}
				}
			}
			String[] best_words = new String[this.NBestWords];
			for (int i = this.NBestWords - 1; i >= 0; i--) {
				best_words[i] = this.idxTerm.get(q.poll().getSecond());
			}
			out.print(this.idxLabel.get(k) + "\t");
			for (int i = 0; i < this.NBestWords; i++) {
				out.print(best_words[i]);
				if (i != this.NBestWords - 1)
					out.print(",");
			}
			out.println();
		}
		out.close();
	}

	public double perplexity() {

		double[][] _phisum = this.phisum;
		double[][] phi = new double[K][V];
		for (int k = 0; k < K; k++) {
			double sum = 0.0;
			for (int w = 0; w < V; w++)
				sum += _phisum[k][w];
			for (int w = 0; w < V; w++)
				phi[k][w] = _phisum[k][w] / sum;
		}
		double[][] _thetasum = this.newthetasum;
		double[][] theta = new double[documents.length][K];
		for (int m = 0; m < documents.length; m++) {
			double sum = 0.0;
			for (int k = 0; k < K; k++)
				sum += _thetasum[m][k];
			for (int k = 0; k < K; k++)
				theta[m][k] = _thetasum[m][k] / sum;
		}
		double nom = 0.0, denom = 0.0;

		for (int m = 0; m < documents.length; m++) {
			double prob_doc = 1.0;
			for (int w = 0; w < documents[m].length; w++) {
				double prob_w = 0;
				for (int k = 0; k < this.K; k++) {
					prob_w += theta[m][k] * phi[k][w];
				}
				prob_doc += Math.log(prob_w);
			}
			nom += prob_doc;
			denom += documents[m].length;
		}
		double perp = Math.exp(-nom / denom);
		// System.out.println("Held-out Log likelihood:"+nom+":"+denom+":"+(nom/denom));
		// return perp;
		// System.out.println(nom);
		return nom;
	}

	public static void init() throws Exception {
		String args[] = Configure.LLDArgs;

		CommandLineParser parser = new GnuParser();
		CommandLine line = null;
		try {
			// parse the command line arguments
			line = parser.parse(cmd(), args);
		} catch (Exception exp) {
			// oops, something went wrong
			System.err.println("Command Line Parsing failed.  Reason: "
					+ exp.getMessage());
		}
		if (!line.hasOption("mode") || !line.hasOption("idocs")) {
			System.err.println("Please specify the -mode or -idocs");
			return;
		}
		LabelLDAModel lda = null;
		String docFile = line.getOptionValue("idocs");
		String mode = line.getOptionValue("mode");
		if (mode.equalsIgnoreCase("learning")) {
			lda = new LabelLDAModel(Type.LEARNING);
			set_params(lda, line);
			lda.load_learning_data(docFile, 0, 1, 2);
			lda.learning();
			if (line.hasOption("omodel")) {
				FileOutputStream f = new FileOutputStream(line
						.getOptionValue("omodel"));
				ObjectOutputStream s = new ObjectOutputStream(f);
				s.writeObject(lda);
				s.flush();
			}
			if (line.hasOption("otheta"))
				lda.output_theta(line.getOptionValue("otheta"));
			if (line.hasOption("ophi"))
				lda.output_phi(line.getOptionValue("ophi"));
			if (line.hasOption("oterm"))
				lda.output_terms(line.getOptionValue("oterm"));

		} else if (mode.equalsIgnoreCase("inference")) {
			BURN_IN = 200;
			SAMPLE_LAG = 1;
			FileInputStream in = new FileInputStream(line
					.getOptionValue("imodel"));
			ObjectInputStream ois = new ObjectInputStream(in);
			lda = (LabelLDAModel) ois.readObject();
			set_params(lda, line);
			lda.load_inference_data(docFile, 0, 1, 2);
			lda.inference();

			if (line.hasOption("otheta"))
				lda.output_theta(line.getOptionValue("otheta"));
			/*
			 * if (line.hasOption("operp")) System.out.println("Perplexity:" +
			 * lda.perplexity());
			 */}
	}

	public static void pedworkRun(LinkedList<ArrayList<String[]>> docFile,
			String imodel, String otheta) throws FileNotFoundException {
		LabelLDAModel lda = null;

		BURN_IN = 200;
		SAMPLE_LAG = 1;
		FileInputStream in = new FileInputStream(imodel);
		System.out.println(in);
		ObjectInputStream ois;
		try {
			ois = new ObjectInputStream(in);
			lda = (LabelLDAModel) ois.readObject();
			lda.load_inference_data_stream(docFile, 0, 1, 2);
			lda.inference_no_perplexity();
			lda.output_theta(otheta);
		} catch (IOException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		} catch (ClassNotFoundException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		} catch (Exception e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		}

	}

	public static void set_params(LabelLDAModel lda, CommandLine line) {
		if (line.hasOption("alpha"))
			lda.alpha = Double.parseDouble(line.getOptionValue("alpha"));
		if (line.hasOption("beta"))
			lda.beta = Double.parseDouble(line.getOptionValue("beta"));
		if (line.hasOption("iter"))
			lda.ITERATIONS = Integer.parseInt(line.getOptionValue("iter"));
		if (line.hasOption("burn"))
			lda.BURN_IN = Integer.parseInt(line.getOptionValue("burn"));
		if (line.hasOption("lag"))
			lda.SAMPLE_LAG = Integer.parseInt(line.getOptionValue("lag"));
	}

	public static Options cmd() {
		Options options = new Options();
		options.addOption("mode", true, "the mode, either learning|inference");
		options.addOption("alpha", true, "the alpha value");
		options.addOption("beta", true, "the beta value");
		options.addOption("iter", true,
				"the number of gibbs sampling iterations");
		options.addOption("burn", true, "the number of burn in iterations");
		options.addOption("lag", true, "the sample lag to compute theta & phi");
		options.addOption("idocs", true,
				"the input documents with label and terms");
		options.addOption("imodel", true,
				"the input path for previously learned model");
		options
				.addOption("omodel", true,
						"the output path for resulting model");
		options.addOption("otheta", true,
				"the output path for document topic distribution");
		options.addOption("ophi", true,
				"the output path for document topic term distribution");
		options.addOption("oterm", true,
				"the output path for document term list");
		options.addOption("operp", true,
				"the output path for document term list");
		return options;
	}
}