package telex;

import java.io.*;
import java.util.*;
import com.aliasi.chunk.Chunk;
import com.aliasi.chunk.Chunking;
import com.aliasi.sentences.IndoEuropeanSentenceModel;
import com.aliasi.sentences.MedlineSentenceModel;
import com.aliasi.sentences.SentenceChunker;
import com.aliasi.sentences.SentenceModel;
import com.aliasi.tokenizer.IndoEuropeanTokenizerFactory;
import com.aliasi.tokenizer.TokenizerFactory;

public class LEXEvaluator {
	
	public boolean evaluate(String aPath, String aIQFPath, String aCountPath, boolean aSCP, double aDelta, double aTau, String aOutputPath) {
		// System.out.println("Extracting queries from file `" + aPath + "'...");
		BufferedReader br;
		SentenceChunker sd;
		ArrayList<String> sents = new ArrayList<String>();
		Sentence[] doc;
		
		try {
			TokenizerFactory TOKENIZER_FACTORY = IndoEuropeanTokenizerFactory.INSTANCE;
                        // new MedlineSentenceModel()
                        SentenceModel SENTENCE_MODEL  = new IndoEuropeanSentenceModel(true, false);
			sd = new SentenceChunker(TOKENIZER_FACTORY,SENTENCE_MODEL);
		} catch (Exception e) {
			System.out.println("Couldn't create LingPipe sentence detector (" + e + ")");
			return false;
		}
		
		try {
			String para = "";
			
			br = new BufferedReader(new FileReader(aPath));
			
			while (true) {
				String buf = br.readLine();
				
				if (buf == null || buf.trim().equals("")) {
					// end of paragraph/file! check to make sure last paragraph had some text in it;
					// if not, skip to the next paragraph or break if we're at the end of the file.
					if (para.length() == 0) {
						if (buf == null) break;
						else continue;
					}
					
					// dump paragraph to sentence detector.
					Chunking chunking = sd.chunk(para);
					Set<Chunk> set = chunking.chunkSet();
					String[] stp = new String[set.size()];
					int index = 0;
					for (Chunk c : set) stp[index++] = chunking.charSequence().subSequence(c.start(),c.end()).toString();
					sents.addAll(Arrays.asList(stp));
					para = "";
					
					// end of file; break out of the loop.
					if (buf == null) break;
				} else {
					para += buf.trim() + "\n";
				}
			}
			
			br.close();
		} catch (FileNotFoundException e) {
			System.out.println("Error: file `" + aPath + "' not found!");
			return false;
		} catch (IOException e) {
			System.out.println("Error: couldn't extract queries from file (" + e + ")");
			return false;
		}
		
		doc = new Sentence[sents.size()];

                // ciclo para marcar as palavras que comecem por letra maiuscula
		for (int i = 0; i < sents.size(); i++) {
			try {
				doc[i] = LEXComputer.markCapsEntities(new Sentence(sents.get(i)));
			} catch (Exception e) {
				System.out.println("Couldn't tokenize sentence (" + e + ")");
				return false;
			}
		}
		
		// load saved IQF and set up a LEXComputer.
		LEXComputer lex = new LEXComputer(5);
		
		try {
			IQFNGramDatabase db = new IQFNGramDatabase(aCountPath); 
			//aIQFPath contem as queries e as respectivas contagens
			db.addIQF(new IQFReader(aIQFPath));
			
			lex.setDatabase(db);
		} catch (Exception e) {
			System.err.println("Couldn't open IQF file: " + e);
			return false;
		}
		
		// open output file if not null.
		PrintStream out = System.out;
		
		if (aOutputPath != null) {
			try {
				out = new PrintStream(aOutputPath);
			} catch (Exception e) {
				System.err.println("Couldn't open output file: " + e);
				return false;
			}
		} 
		
		for (Sentence s : doc) {
			// perform pruning on sentence-initial words that occur sentence-initially too often.
			double v = lex.computeDelta(s.getWord(0));
			
			if (v > aDelta) {
				//System.out.println("'" + s.getWord(0) + "' sentence-initial too often; clearing from named entity (" + v + " > " + aDelta + ")");
				s.clearInNamedEntity(0);
			} else {
				//System.out.println("'" + s.getWord(0) + "' passed delta check (" + v + " <= " + aDelta + ")");
			}
			
			// perform LEX operation: get list of all CAPS entities.
			ArrayList<Sentence.EntityRec> ents = new ArrayList<Sentence.EntityRec>(Arrays.asList(s.getNamedEntities()));
			
			// since asList and the from-Collection constructor don't guarantee order-invariance, we should make sure ents is sorted by
			// first-word-index.
			Collections.sort(ents, new Comparator<Sentence.EntityRec>(){
				public int compare(Sentence.EntityRec aA, Sentence.EntityRec aB) {
					return (int)Math.signum(aA.getStartIndex() - aB.getStartIndex());
				}
			});
			
			while (ents.size() > 1) {
				// attempt to merge first pair of entities in ents.
				Sentence.EntityRec a = ents.get(0), b = ents.get(1);
				String[] over_words = s.getWordSpan(a.getEndIndex(), b.getStartIndex());
				
				if (over_words.length > 3) {
					System.out.println("Skipping merge of " + Arrays.toString(a.getWords()) +
							" and " + Arrays.toString(b.getWords()) + " (merge too wide)");
					ents.remove(0);
					continue;
				}
				
				v = lex.computeTau(aSCP ? 3.0 : 1.0, a.getWords(), over_words, b.getWords()); 
				
				if (v < 0) {
					System.out.println("Internal error: missing query while attempting to merge " + Arrays.toString(a.getWords()) +
							" and " + Arrays.toString(b.getWords()));
					break;
				} else if (v > aTau) {
					System.out.println("Merging " + Arrays.toString(a.getWords()) + " and " + Arrays.toString(b.getWords()) + 
						" over " + Arrays.toString(over_words) + " (" + v + " > " + aTau + ")");
					
					// drop entities being merged from list of entities to merge, replace with merged entity.
					Sentence.EntityRec new_ent = s.merge(a, b, true);
					
					ents.remove(0); ents.remove(0);
					ents.add(0, new_ent);
				} else {
					System.out.println("Not merging " + Arrays.toString(a.getWords()) + " and " + Arrays.toString(b.getWords()) + 
							" over " + Arrays.toString(over_words) + " (" + v + " <= " + aTau + ")");
					
					// drop first entity; it can't merge any farther right.
					ents.remove(0);
				}
			}
			
			out.println(s);
		}
		
		try {
			out.close();
		} catch (Exception e) {
			System.err.println("Warning: failed to close output file.");
		}
		
		return true;
	}
	
	static void banner() {
		System.out.print(
			"teLEX Evaluator v. 0.1\n" +
			"Colin Bayer and Douglas Downey <{vogon,ddowney}@cs.washington.edu>\n" +
			"Based on an algorithm in 'Locating Named Entities in Web Text'\n" +
				"\t(Downey, Broadhead, Etzioni 2007, published in IJCAI 2007 Proceedings)\n" +
			"See README for a description of the Evaluator and teLEX in general.\n" +
			"=====================================================================================\n"
		);
	}
	
	static void usage() {
		banner();
		System.err.print(
			"Usage: java LEXEvaluator [-f (scp|pmi)] [-nonlp|-nlp path] [-counts path] [-q] tau delta doc iqf [output]\n" +
			"\t-f (scp|pmi): use SCP or PMI to evaluate named entities (default SCP)\n" +
			"\t-nonlp: don't perform NLP on the input file (assume space-delimited tokens, one sentence per line; default)\n" +
			"\t-nlp path: perform NLP on the input file with specified path to OpenNLP Tools distribution (with models subdirectory)\n" +
			"\t-counts path: path to n-gram database counts file (default .)\n" + 
			"\t-q: quiet (suppress all output)\n" +
			"\ttau: tau threshold (range: [0, inf))\n" +
			"\tdelta: delta threshold (range: [0, 1])\n" +
			"\tdoc: path to document file, or directory containing one or more\n" +
				"\t\tdocuments to evaluate\n" +
			"\tiqf: path to IQF file, or directory containing one or more IQF\n" +
				"\t\tfiles to read queries from\n" +
			"\toutput: path to output tagged documents in (default: doc + \".tagged\"\n" +
				"\t\tfor a single file, or doc + \"-t\" for a directory)\n"
		);
		System.exit(255);
	}
	
	public static void main(String[] aArgs) {
		if (aArgs.length < 1) {
			// no filename specified!
			usage();
		}
		
		String nlp_path = null, doc_path = null, iqf_path = null, output_path = null, counts_path = null;
		double tau = -1, delta = -1;
		boolean use_scp = true, quiet = false, no_nlp = false;
		
		for (int i = 0; i < aArgs.length; i++) {
			boolean silence_pathname_warning = false;
			
			if (aArgs[i].equals("-f")) {
				// -f option.
				if (i == aArgs.length - 1 || 
						(!aArgs[i + 1].equalsIgnoreCase("pmi") && 
						 !aArgs[i + 1].equalsIgnoreCase("scp"))) {
					System.err.println("Must specify 'scp' or 'pmi' as argument to -f option.");
					usage();
				} else if (aArgs[i + 1].equalsIgnoreCase("pmi")) {
					use_scp = false;
				} else if (aArgs[i + 1].equalsIgnoreCase("scp")) {
					use_scp = true;
				}
				
				i++;
			} if (aArgs[i].equals("-nlp")) {
				// -nlp option.
				if (i == aArgs.length - 1) {
					System.err.println("-nlp option requires an argument.");
					usage();
				}
				
				nlp_path = aArgs[i + 1];
				i++;
			} else if (aArgs[i].equals("-counts")) {
				// -counts option.
				if (i == aArgs.length - 1) {
					System.err.println("-counts option requires an argument.");
					usage();
				}
				
				counts_path = aArgs[i + 1];
				i++;
			} else if (aArgs[i].equals("-q")) {
				quiet = true;
			} else if (aArgs[i].equals("-nonlp")) {
				no_nlp = true;
			} else if (aArgs[i].charAt(0) == '-') {
				System.err.println("Unrecognized option '" + aArgs[i] + "'.");
				usage();
			} else {
				// pathname in arguments list.  assign it to the first of doc_path or output_path,
				// or warn if both are already assigned.
				if (tau == -1) try { tau = Double.parseDouble(aArgs[i]); }
					catch (Exception e) {
						System.err.println("Tau must be numeric! (" + aArgs[i] + ")");
						usage();
					}
				else if (delta == -1) try { delta = Double.parseDouble(aArgs[i]); }
					catch (Exception e) {
						System.err.println("Delta must be numeric! (" + aArgs[i] + ")");
					}
				else if (doc_path == null) doc_path = aArgs[i];
				else if (iqf_path == null) iqf_path = aArgs[i];
				else if (output_path == null) output_path = aArgs[i];
				else if (!silence_pathname_warning) {
					System.err.println("Warning: unused pathname argument ignored.");
					silence_pathname_warning = true;
				}
 			}
		}
		
		if (!no_nlp) {
			if (nlp_path == null) nlp_path = ".";	
			OpenNLPProvider.setSentenceDetectorModelPath(nlp_path + "/models/sentdetect/EnglishSD.bin.gz");
			OpenNLPProvider.setTokenizerModelPath(nlp_path + "/models/tokenize/EnglishTok.bin.gz");
		}
		
		// set up default counts_path if none provided.
		if (counts_path == null) counts_path = ".";
		
		ArrayList<String[]> files_to_process = new ArrayList<String[]>();
		
		if (!quiet) banner();
		
		if (doc_path == null) {
			System.err.println("No input path specified.");
			usage();
		} else if (tau == -1 || delta == -1) {
			System.err.println("Tau and/or delta not specified.");
			usage();
		} else {
			File f = new File(doc_path);
			boolean input_is_dir = false;
			
			// detect directory-ness of doc_path.	
			if (!f.exists()) {
				System.err.println("Input file does not exist.");
				System.exit(1);
			} else if (f.isDirectory()) {
				if (!quiet) System.out.println("<== Input directory: " + f);
				input_is_dir = true;
			} else if (f.isFile()) {
				if (!quiet) System.out.println("<== Input file: " + f);
				input_is_dir = false;
			} else {
				System.err.println("Input file exists, but is not a normal file or a directory.");
				System.exit(1);
			}
			
			// generate output path, if none provided.
			if (output_path == null) {
				if (input_is_dir) output_path = doc_path + "-t";
				else output_path = doc_path + ".tagged";
			}
			
			// check that the output path either doesn't exist, or is of the right type to overwrite.
			// create the output directory if we're writing a directory.
			f = new File(output_path);

			if (!quiet) System.out.println("==> Output " + (input_is_dir ? "directory" : "file") + 
					(f.exists() ? ": " : " (creating): ") + output_path);
			
			if (!f.exists() && input_is_dir) {
				try {
					f.mkdirs();
				} catch (Exception e) {
					System.err.println("Error trying to create output directory: " + e);
					System.exit(1);
				}
			} else if (f.isDirectory() && !input_is_dir) {
				System.err.println("Output file already exists, but is a directory; refusing to overwrite it.");
				System.exit(1);
			} else if (f.isFile() && input_is_dir) {
				System.err.println("Output file already exists, but isn't a directory; refusing to overwrite it.");
				System.exit(1);
			}
			
			// generate list of input and output filenames to process.
			if (input_is_dir) {
				f = new File(doc_path);
				
				String[] files_in_dir = f.list();
				
				if (files_in_dir == null) {
					System.err.println("Error getting list of files in input directory.  Halting.");
					System.exit(1);
				}
				
				for (String fname : files_in_dir) {
					files_to_process.add(new String[]
					    {doc_path + "/" + fname,
					     iqf_path + "/" + fname,
					     output_path + "/" + fname});
				}
			} else {
				files_to_process.add(new String[]{doc_path, iqf_path, output_path});
			}
		}
		
		LEXEvaluator ev = new LEXEvaluator();
		
		for (String[] fnames : files_to_process) {
			if (!quiet) System.out.println("Evaluating text from " + fnames[0] + " to " + fnames[2] + " (IQF file " + fnames[1] + ")...");
			
			if (!ev.evaluate(fnames[0], fnames[1], counts_path, use_scp, delta, tau, fnames[2])) {
				System.out.println("Evaluation failed.");
				System.exit(1);
			}
		}
		
		if (!quiet) System.out.println("Extraction completed successfully (evaluated " + 
				files_to_process.size() + " files).");
		return;
	}
}