package util.delicious;

import java.io.File;
import java.io.IOException;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.SortedSet;

import util.io.FileInput;

import com.aliasi.lm.TokenizedLM;
import com.aliasi.tokenizer.IndoEuropeanTokenizerFactory;
import com.aliasi.tokenizer.TokenizerFactory;
import com.aliasi.util.Files;
import com.aliasi.util.ScoredObject;

public class AspectMinning {

	/**
	 * @param args
	 */

	private static final String NGRAM = null;
	private static final String BACKGROUND_DIR = null;

	/**
	 * Method based on background model
	 * 
	 * Background model
	 * 
	 * 
	 */

	/**
	 * Generate text file with tags to use the SimpleSpace tokenizer of lingpipe
	 * 
	 * 
	 */
	public static void genKidsDElicious(String delicious) {

		FileInput in = new FileInput(delicious);

		String line = in.readString();

		int TAGS_POS = 4;
		int k = 0;
		while (line != null) {
			String t[] = line.split("\t");

			LinkedList<String> tags = new LinkedList<String>();

			boolean flag = false;

			for (int i = TAGS_POS; i < t.length; i++) {

				tags.add(t[i]);
			}

			// generate lines
			for (int i = 0; i < tags.size(); i++) {

				if (tags.get(i).equals("kids")
						|| tags.get(i).equals("children")
						|| tags.get(i).equals("4kids")
						|| tags.get(i).equals("4children")
						|| tags.get(i).equals("forkids")
						|| tags.get(i).equals("forchildren")
						|| tags.get(i).equals("for_kids")
						|| tags.get(i).equals("for_children")

				)
					flag = true;

				for (int j = i + 1; j < tags.size(); j++) {

					// System.out.println(tags.get(i) + "\t" + tags.get(j));
					// System.out.println(tags.get(j) + "\t" + tags.get(i));

				}

			}

			if (flag)
				System.out.println(line);

			line = in.readString();

		}

	}

	public static void genTrainingBigrams(String delicious) {

		FileInput in = new FileInput(delicious);

		String line = in.readString();

		int TAGS_POS = 4;

		while (line != null) {
			String t[] = line.split("\t");

			LinkedList<String> tags = new LinkedList<String>();

			for (int i = TAGS_POS; i < t.length; i++) {

				tags.add(t[i]);

			}
			// generate lines
			for (int i = 0; i < tags.size(); i++) {

				for (int j = i + 1; j < tags.size(); j++) {

					System.out.println(tags.get(i) + "\t" + tags.get(j));
					System.out.println(tags.get(j) + "\t" + tags.get(i));

				}

			}

			line = in.readString();

		}

	}

	public static void genTrainingTrigrams(String delicious) {

		FileInput in = new FileInput(delicious);

		String line = in.readString();

		int TAGS_POS = 4;

		while (line != null) {
			String t[] = line.split("\t");

			LinkedList<String> tags = new LinkedList<String>();

			for (int i = TAGS_POS; i < t.length; i++) {

				tags.add(t[i]);

			}
			// generate lines
			for (int i = 0; i < tags.size(); i++) {

				for (int j = i + 1; j < tags.size(); j++) {

					for (int k = j + 1; k < tags.size(); k++) {

						System.out.println(tags.get(i) + "\t" + tags.get(j)
								+ "\t" + tags.get(k));
						System.out.println(tags.get(i) + "\t" + tags.get(k)
								+ "\t" + tags.get(j));

						System.out.println(tags.get(j) + "\t" + tags.get(i)
								+ "\t" + tags.get(k));
						System.out.println(tags.get(j) + "\t" + tags.get(k)
								+ "\t" + tags.get(i));

						System.out.println(tags.get(k) + "\t" + tags.get(i)
								+ "\t" + tags.get(j));
						System.out.println(tags.get(k) + "\t" + tags.get(j)
								+ "\t" + tags.get(i));

					}

				}

			}

			line = in.readString();

		}

	}

	public void generateModel(String background_dir, int ngram,
			int NGRAM_REPORTING_LENGTH,

			int MIN_COUNT, int MAX_COUNT

	) throws IOException {

		IndoEuropeanTokenizerFactory tokenizerFactory = new IndoEuropeanTokenizerFactory();

		System.out.println("Training background model");
		TokenizedLM backgroundModel = buildModel(tokenizerFactory, ngram,
				new File(background_dir));

		backgroundModel.sequenceCounter().prune(3);

		System.out.println("\nAssembling collocations in Training");
		SortedSet<ScoredObject<String[]>> coll = backgroundModel
				.collocationSet(NGRAM_REPORTING_LENGTH, MIN_COUNT, MAX_COUNT);

		System.out.println("\nCollocations in Order of Significance:");
		report(coll);

	}

	private void report(SortedSet<ScoredObject<String[]>> coll) {
		// TODO Auto-generated method stub

		Iterator<ScoredObject<String[]>> iterator = coll.iterator();

		while (iterator.hasNext()) {

			ScoredObject<String[]> s = iterator.next();

			double score = s.score();
			String[] toks = (String[]) s.getObject();
			report_filter(score, toks);

		}
	}

	private static void report_filter(double score, String[] toks) {
		String accum = "";
		for (int j = 0; j < toks.length; ++j) {
			if (nonCapWord(toks[j]))
				return;
			accum += " " + toks[j];
		}
		System.out.println("Score: " + score + " with :" + accum);
	}

	private static boolean nonCapWord(String tok) {
		if (!Character.isUpperCase(tok.charAt(0)))
			return true;
		for (int i = 1; i < tok.length(); ++i)
			if (!Character.isLowerCase(tok.charAt(i)))
				return true;
		return false;
	}

	private static TokenizedLM buildModel(TokenizerFactory tokenizerFactory,
			int ngram, File directory) throws IOException {

		String[] trainingFiles = directory.list();
		TokenizedLM model = new TokenizedLM(tokenizerFactory, ngram);

		System.out.println("Training on " + directory);

		for (int j = 0; j < trainingFiles.length; ++j) {
			File file = new File(directory, trainingFiles[j]);
			String text = Files.readFromFile(file, "ISO-8859-1");
			model.train(text);
		}
		return model;
	}

	public static void main(String[] args) {
		// gTODO Auto-generated method stub

		String path = "/media/sata_/data/delicious/delicious_kids_final.txt";
		genTrainingTrigrams(path);
	}

}
