
import java.util.Arrays;
import java.util.Locale;
import java.util.Scanner;

import org.cogroo.analyzer.Analyzer;
import org.cogroo.analyzer.ComponentFactory;
import org.cogroo.text.Chunk;
import org.cogroo.text.Document;
import org.cogroo.text.Sentence;
import org.cogroo.text.SyntacticChunk;
import org.cogroo.text.Token;
import org.cogroo.text.impl.DocumentImpl;

/**
 * CoGrOO 4.0.0-SNAPSHOT usage example
 */
public class MainTest {

	/** the CoGrOO pipe instance */
	private Analyzer cogroo;

	public MainTest() {
		/*
		 * The following command creates a component factory given a locale. The
		 * locale will be resolved as a configuration file in the classpath with
		 * the following pattern: /models_lang_COUNTRY. Another option is to use
		 * the method ComponentFactory.create(InputStream) directly.
		 */
		ComponentFactory factory = ComponentFactory.create(new Locale("pt",
				"BR"));

		/*
		 * Create the default pipe, which is complete, including from sentence
		 * detection to featurization.
		 */
		cogroo = factory.createPipe();
	}

	/**
	 * Creates a document and set the imput text. Finally analyze it using the
	 * pipe
	 */
	public void analyzeAndPrintDocument(String documentText) {

		// Create a document and set the text.
		Document document = new DocumentImpl();
		document.setText(documentText);

		// lets measure the time...
		long start = System.nanoTime();

		// analyze it
		cogroo.analyze(document);

		System.out.println("Document processed in "
				+ ((System.nanoTime() - start) / 1000000) + "ms");

		print(document);
	}

	/** A utility method that prints the analyzed document to the std output. */
	private void print(Document document) {
		StringBuilder output = new StringBuilder();

		// and now we navigate the document to print its data
		for (Sentence sentence : document.getSentences()) {

			// Print the sentence. You can also get the sentence span
			// annotation.
			output.append("Sentence: ").append(sentence.getText()).append("\n");

			output.append("  Tokens: \n");

			// for each token found...
			for (Token token : sentence.getTokens()) {
				String lexeme = token.getLexeme();
				String lemmas = Arrays.toString(token.getLemmas());
				String pos = token.getPOSTag();
				String feat = token.getFeatures();

				System.out.println(lemmas + "  " + feat);
				
				output.append(String.format("    %-10s %-12s %-6s %-10s\n",
						lexeme, lemmas, pos, feat));
			}

			// we can also print the chunks, but printing it is not that easy!
			output.append("  Chunks: ");
			for (Chunk chunk : sentence.getChunks()) {
				output.append("[").append(chunk.getTag()).append(": ");
				for (Token innerToken : chunk.getTokens()) {
					output.append(innerToken.getLexeme()).append(" ");
				}
				output.append("] ");
			}
			output.append("\n");

			// we can also print the shallow parsing results!
			output.append("  Shallow Structure: ");
			for (SyntacticChunk structure : sentence.getSyntacticChunks()) {
				output.append("[").append(structure.getTag()).append(": ");
				for (Token innerToken : structure.getTokens()) {
					output.append(innerToken.getLexeme()).append(" ");
				}
				output.append("] ");
			}
			output.append("\n");
		}

		System.out.println(output.toString());
	}

	/**
	 * @param args
	 */
	public static void main(String[] args) {

		MainTest ex = new MainTest();

		Scanner kb = new Scanner(System.in);
		System.out.print("Enter the sentence or 'q' to quit: ");
		String input = kb.nextLine();

		while (!input.equals("q")) {
			ex.analyzeAndPrintDocument(input);

			System.out.print("Enter the sentence or 'q' to quit: ");
			input = kb.nextLine();
		}
	}
}