import java.io.File;
import java.io.IOException;
import java.util.Scanner;
import java.util.Set;

import utils.ScoredEntities;

import model.Collection;
import model.Document;
import model.LCAWeightingModel;
import model.Query;

import edu.stanford.nlp.tagger.maxent.MaxentTagger;
import graph.GraphGenerator;

public class Experimenter {
	public static int topic = 933;
	public static final String TAGGER_MODEL = "./models/bidirectional-distsim-wsj-0-18.tagger";
	public static final String COLLECTION_LOCATION = "C:\\Users\\Amos\\workspace\\Data\\selected\\"
			+ topic + "\\col";
	public static final String DOC_LOCATION = "C:\\Users\\Amos\\workspace\\Data\\selected\\"
			+ topic + "\\doc";
	public static final String PLAIN_TXT = "C:\\Users\\Amos\\workspace\\Data\\selected\\"
			+ topic + "\\plain";
	public static final String RESULT_LOCATION = "C:\\Users\\Amos\\workspace\\Data\\selected\\"
			+ topic + "\\res";
	public static final String SENTIMENT_LEXICON = "./models/sent_clean-1.txt";
	public static final String QUERY = "Winter Olympics";
	public static final int WINDOW_SIZE = 300;
	public static final int TOP_N_DOCS = 500;
	public static final int TOP_N_CONCEPTS = 50;

	// Carry out the experiments
	public static void main(String args[]) {
		MaxentTagger tagger;
		try {
			tagger = new MaxentTagger(TAGGER_MODEL);
			int numOfDocs = Collection.addDirectory(DOC_LOCATION, false).size();
//			System.out.println(numOfDocs + " docs found at " + DOC_LOCATION);
			// Stage 1 - Split the documents into slices
			// for (String docLocation : Collection.addDirectory(DOC_LOCATION,
			// false)) {
			// System.out.println("Processing " + docLocation);
			// Document d = new Document(tagger, new File(docLocation));
			// d.saveWindows(WINDOW_SIZE, COLLECTION_LOCATION);
			// }
			// System.out.println("=== Stage 1 done ===");

			// Stage 2 - Construct the collection
//			Collection c = new Collection(COLLECTION_LOCATION, false, true,
//					false);
//			c.initCollectionParameters(false, false);
//			System.out.println("=== Stage 2 done ===");

			// Stage 3 - Retrieve top N documents and get most frequent concepts

			// from top documents
//			c.rankDocsBM25(new Query(QUERY), false);
//			Set<String> concepts = c.getConcepts(TOP_N_DOCS);
//			System.out.println(concepts.size() + " concepts discovered.");
//
//			System.out.println("=== Stage 3 done ===");
//			LCAWeightingModel model = new LCAWeightingModel(c, QUERY,
//					TOP_N_DOCS);
//			ScoredEntities conceptScores = new ScoredEntities();
//			for (String concept : concepts) {
//				conceptScores.addWord(concept, model.similarity(concept));
//			}
			// for(String term:(new Query(QUERY)).toArray(true)){
			// conceptScores.addWord(term, 1);
			// }
//			conceptScores.printTopN(TOP_N_CONCEPTS);

			// Sentiment lexicon
//			ScoredEntities sentimentWordScores = new ScoredEntities(new File(
//					SENTIMENT_LEXICON));
			System.out.println("=== Stage 3 done ===");

			// Stage 4 - Scoring the documents and save in graph format
//			for (String doc : Collection.addDirectory(DOC_LOCATION, false)) {
//				Document d = new Document(tagger, new File(doc));
//				d.saveLineByLine(doc);
//				d.scoreByLexicon(conceptScores,
//						RESULT_LOCATION + Collection.getSeparator() + "topic",
//						true, false);
//				d.scoreByLexicon(sentimentWordScores, RESULT_LOCATION
//						+ Collection.getSeparator() + "sentiment", true, false);
//			}
			System.out.println("=== Scoring done ===");
			System.out.println("Topic scores are written to the files in "
					+ RESULT_LOCATION + Collection.getSeparator() + "topic");
			System.out
					.println("Sentiment scores are written to the files in "
							+ RESULT_LOCATION + Collection.getSeparator()
							+ "sentiment");
			System.out.println("=== Generating graphs for topic " + topic
					+ "===");
			GraphGenerator.processDirectory(
					"C:\\Users\\Amos\\workspace\\Data\\selected\\" + topic
							+ "\\res", 0);
			System.out
					.println("=== Graphs created for topic " + topic + " ===");
			System.out.println("=== Summary Extraction Commencing ===");
			int algorithmSeq = printMenu();
			switch (algorithmSeq) {
			case 1:
				System.out.println("Using the degree-based extraction algorithm.");
				break;
			case 2:
				System.out.println("Using the strength-based extraction algorithm.");
				break;
			case 3:
				System.out.println("Using the kcore-based extraction algorithm.");
				break;
			case 4:
				System.out.println("Using the wcut-based extraction algorithm.");
				break;
			default:
				break;
			}
		} catch (IOException e) {
			e.printStackTrace();
		} catch (ClassNotFoundException e) {
			e.printStackTrace();
		}

	}

	public static int printMenu() {
		System.out.println("1. Create degree-based summaries");
		System.out.println("2. Create strength-based summaries");
		System.out.println("3. Create kcore-based summaries");
		System.out.println("4. Create wcut-based summaries");
		System.out.println("Select an option: ");
		Scanner in = new Scanner(System.in);
		int option = Integer.parseInt(in.next());
		while (option < 1 || option > 4) {
			System.out
					.println("Wrong option. Must choose from the 4 algorithms: ");
			option = Integer.parseInt(in.next());
		}
		return option;
	}
}
