package edu.ptit.app;

import java.io.BufferedWriter;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.OutputStreamWriter;
import java.util.List;
import java.util.Map;
import java.util.StringTokenizer;

import edu.ptit.jad.configuration.CONSTANTS;
import edu.ptit.jad.data.DataAccess;
import edu.ptit.jad.preprocess.app.LdaModel;
import edu.ptit.jad.similarity.CosineSimilarityMeasure;

public class AppAT {

	public static void main(String[] args) throws Exception {

		Map<String, Integer> vobs = DataAccess.getInstance().getVocabularies();

		List<String> news = DataAccess.getInstance().getContents(
				"data/test/100_test_news.jad");

		// Load the model.
		LdaModel model = new LdaModel();
		model.loadModel("data/input/wp.model");

		// save file

		File file = new File("data/input/news_at.theta");
		BufferedWriter out = new BufferedWriter(new OutputStreamWriter(
				new FileOutputStream(file), "UTF8"));

		out.write(news.size() + "\n");
		out.write(model.getNumTopics() + "\n");

		for (String line : news) {

			StringTokenizer tknr = new StringTokenizer(line, " ");

			String encodeLine = "";

			while (tknr.hasMoreTokens()) {

				// allTermsSet.add(tknr.nextToken());
				// sortedset.add(tknr.nextToken());
				encodeLine += vobs.get(tknr.nextToken()) + " ";

			}

			double[] p = model.inference(encodeLine.trim().split(" "));

			for (int i = 0; i < p.length; i++)
				out.write(p[i] + " ");

			out.write("\n");
		}

		out.close();

	}

}
