package DocClass;

import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.util.ArrayList;
import java.io.*;

public class QueryProcessor {
	public ResultSet re;
	public ResultParser r = new ResultParser();
	public String askedQuery;
	public ArrayList<String> newQuery = new ArrayList<String>();
	public double precision = 0;


	public static ArrayList<String> stopWords=new ArrayList<String>();

	static
	{
		//stopWords.add(" ");
		stopWords.add("a");
		stopWords.add("for");
		//stopWords.add("");
		stopWords.add("an");
		stopWords.add("the");
		stopWords.add("be");
		stopWords.add("by");
		stopWords.add("com");
		stopWords.add("de");
		stopWords.add("at");
		stopWords.add("as");
		stopWords.add("from");
		stopWords.add("how");
		stopWords.add("is");
		stopWords.add("it");
		stopWords.add("of");
		stopWords.add("on");
		stopWords.add("or");
		stopWords.add("that");
		stopWords.add("to");
		stopWords.add("are");
		stopWords.add("this");
		stopWords.add("was");
		stopWords.add("who");
		stopWords.add("and");
		stopWords.add("wikipedia");  // mmmaybe?

	}

	public QueryProcessor(String inputQuery) {
		askedQuery = inputQuery;
		String tmp[] = askedQuery.split(" +");
		for (int i = 0; i < tmp.length; i++) {
			newQuery.add(tmp[i]);
		}
	}

	public String runProcess(String answer,int round){
		re = r.addResult(answer);//you need to call parser for adding results to ResultSet and that will return you a filled ResultSet object
		int node=re.getNodeLen();

		DocumentIndex allIndexes[] = new DocumentIndex[node];
		BufferedReader stdin = new BufferedReader(new InputStreamReader(System.in));
		for(int k=0;k<node;k++)
		{
			allIndexes[k] = new DocumentIndex(re.doc[k]);
			System.out.println(
					"Result " + (k + 1) + ": " + re.doc[k].URL
					+ "\n\tTitle: " + re.doc[k].Title
					+ "\n\tAbstract: " + re.doc[k].Abstract);
			do {
				String resp ;
				try {
					System.out.print("Is this relevant? (y/n) ");
					resp = stdin.readLine();
				} catch (IOException e) {
					throw new RuntimeException(e);
				}
				if (null == resp) {
					System.out.println("Exiting.");
					throw new RuntimeException("unexpected EOF on input");
				} else if (resp.matches("^\\s*[Yy].*")) {
					re.doc[k].relevant = true;
					break;
				} else if ( resp.matches("^\\s*[Nn].*")) {
					re.doc[k].relevant = false;
					break;
				}
			} while (true);
			//user input and update doc class


		}
		//create index
		DocumentIndex df = DocumentIndex.getDocumentFrequencies(allIndexes);
//		System.out.println("Total terms: " + df.h.size());
		df.dfInvert();
		int relevant = 0, irrelevant = 0;
		DocumentIndex relsum = new DocumentIndex();
		DocumentIndex irrelsum = new DocumentIndex();

		// sum up the relevant and irrelevant documents.
		// right now, the arraylists are only being used to generate counts--integers
		// would be cheaper.
		for (int i = 0; i < re.doc.length; i++) {
			if (re.doc[i].relevant) {
				++relevant;
				relsum.add(allIndexes[i]);
			}
			else {
				++irrelevant;
				irrelsum.add(allIndexes[i]);
			}
			allIndexes[i].idfMultiply(df);
		}
//		System.out.println("Found " + relevant + " relevant results.");

		// Just doing a subtraction, weighted by ratio between irrelevant
		// and relevant results.

		precision = (double) relevant / 10;

		//createTranscript(askedQuery,round);
		//System.out.println("Weighting factor is " +subWeight);
//		System.out.println("Current precision is " +precision);

		if (0 == precision) return this.askedQuery;
		relsum.scale(1.0 / (double) relevant);
		irrelsum.scale(1.0 / (double) irrelevant);
		DocumentIndex diffIndex = DocumentIndex.difference(relsum, irrelsum, ProcessConst.BETA_PARAM);
	//	System.out.println("Worst 10:");
	//	diffIndex.dumpList(diffIndex.worstN(diffIndex.h, 10, new DocumentIndex(), 0));
		double sigcutoff = ProcessConst.SIGCUTOFFPARAM;
		int found = 0;
		DocumentIndex newExclude = null;
//		System.out.println("Top 10 stemmed words:");
//		diffIndex.dumpStemmedList(diffIndex.bestNStemmed(10, new DocumentIndex(), 1));
		while (2 > found ) {
			newExclude = new DocumentIndex( newQueryString() );
			found += this.processDigrams(diffIndex, newExclude, sigcutoff, 2 - found);
			newExclude.addAll(stopWords);
//			System.out.println("'found' is currently equal to " + found);
			if (2 <= found) { break; }
			else if (0 < found) { newExclude.addAll(newQuery); }
			int newfound = found + this.processUniStems(diffIndex, newExclude, sigcutoff, 2 - found);
			if (2 <= newfound) {
				break;
			} else if (found < newfound) {
				newExclude.addAll(newQuery);
				found = newfound;
			}
//			System.out.println("'found' is currently equal to " + found);
			found += this.processUnigrams(diffIndex, newExclude, sigcutoff, 2 - found);
			sigcutoff *= ProcessConst.SIGCUTOFFLOWERBY;
//			System.out.println("'found' is currently equal to " + found);
			if ( sigcutoff <= (ProcessConst.SIGCUTOFFLIMIT) && 0 != found ) break;
		}
		this.processDigrams(diffIndex, newExclude, 0, 0);

//		System.out.println("Added " + found + " terms to query.");
		return newQueryString();
	}

	/**
	 * @return
	 */
	protected String newQueryString() {
		if (0 == newQuery.size()) return this.askedQuery;
		StringBuffer tmp = new StringBuffer(100);
		for (int i = 0; i < newQuery.size(); i++) {
			if (0 < i ) tmp.append(" ");
			tmp.append(newQuery.get(i));
		}
		return tmp.toString();
	}

	protected int processUnigrams(DocumentIndex docIdx, DocumentIndex exclude, double cutoff, int maxCount) {
		int found = 0;
		String words[] = docIdx.bestN(10, exclude, 1);
//		System.out.println("Best 10 words:");
//		docIdx.dumpList(words);
		for (int i = 0; i < words.length; i++) {
			// if we've reached the end of the list, or found two terms already, break out
			if (null == words[i] || maxCount == found){
//				System.out.println("Max count or EOL (at " + i +")");
				break;
			}
			String current = words[i];
			// likewise, if we've passed the significance cutoff, break out
			double curValue = docIdx.h.get(current);
			if (cutoff > curValue) {
//				System.out.println("Cutoff reached (at " + i +")");
				break;
			}
			// otherwise, just add this term
			found++;
			newQuery.add(current);
		}
		return found;
	}
	protected int processDigrams(DocumentIndex docIdx, DocumentIndex exclude, double cutoff, int maxCount) {
		String queryTerms[] = newQuery.toArray(new String[0]);
		String diGrams[] = docIdx.bestN(10, exclude, 2);
//		System.out.println("Best 10 digrams:");
//		docIdx.dumpList(diGrams);
		int found = 0;
		for (int i = 0; i < diGrams.length; i++) {
			// if we've reached the end of the list, or found two terms already, break out
			if (null == diGrams[i]) {
//				System.out.println("Max count or EOL (at " + i +")");
				break;
			}
			String current = diGrams[i];
			// likewise, if we've passed the significance cutoff, break out
			double curValue = docIdx.get(current);
			if (cutoff > curValue) {
//				System.out.println("Cutoff reached (at " + i +")");
				break;
			}

			for (int j = 0; j < queryTerms.length; j++) {
				String oldTerm = queryTerms[j];
				int idx = current.indexOf(oldTerm);
				if ( 0 > idx) continue;
				else {
					int mappedJ = newQuery.indexOf(oldTerm);
					String words[] = current.split(" ");

					String newTerm;
					boolean insertBefore;
					if ( 0 == idx ) {
						// query term at beginning of digram
						insertBefore = false;
						newTerm = words[1];
					} else {
						insertBefore = true;
						newTerm =  words[0];
					}
					int curIdx = newQuery.indexOf(newTerm);
					if ( 0 <= curIdx ) {  // both terms are already in the query
						boolean swapOK = true;
						/* possible cases:
						 * 		query A B C, digram A C => 'A C B' OR 'B A C'
						 * 			but what if sig(A C) < sig(A B) ?
						 * 		query A B C digram C A => C A B
						 * 			ditto BC vs. CA
						 * 		query A B, digram B A => B A
						 * 		query A B C, digram B A => B A C
						 * 			except... BC vs BA?
						 */

						if (this.getDigramScore(curIdx, docIdx, false) >= curValue
							||
							this.getDigramScore(curIdx, docIdx, true) >= curValue
							||
							this.getDigramScore(mappedJ, docIdx, insertBefore) >= curValue) {
							swapOK = false;
						}
						if (swapOK) {
							newQuery.remove(newTerm);
							mappedJ = newQuery.indexOf(oldTerm);
							newQuery.add(insertBefore ? mappedJ : mappedJ + 1, newTerm);
						}
					} else if (maxCount > found) { // if only one is present, we may not be able to add the other
						double termScore = docIdx.get(newTerm);
						double compScore = this.getDigramScore(mappedJ, docIdx, insertBefore);
						// we don't add a low-relevance term, even if it's in a significant digram
						if (compScore < curValue && termScore > cutoff) {
							found++;
							int insertionIdx = insertBefore ? mappedJ : mappedJ + 1;
							newQuery.add(insertionIdx, newTerm);
						}
					}

				}
				break;
			}
		}
		return found;
	}

	protected int processUniStems(DocumentIndex docIdx, DocumentIndex exclude, double cutoff, int maxCount) {
		int found = 0;
		String words[] = docIdx.bestNStemmed(10, exclude, 1);
//		System.out.println("Best 10 stems:");
//		docIdx.dumpStemmedList(words);
		for (int i = 0; i < words.length; i++) {
			// if we've reached the end of the list, or found two terms already, break out
			if (null == words[i] || maxCount == found){
//				System.out.println("Max count or EOL (at " + i +")");
				break;
			}
			String current = words[i];
			// likewise, if we've passed the significance cutoff, break out
			double curValue = docIdx.stems.get(current);
			if (cutoff > curValue) {
//				System.out.println("Cutoff reached (at " + i +")");
				break;
			}
			// otherwise, just add this term
			found++;
			newQuery.add(docIdx.stemReverse.get(current));
		}
		return found;
	}

	private double getDigramScore(int baseIdx, DocumentIndex docIdx, boolean reverse) {
		int startIdx = reverse ? baseIdx - 1 : baseIdx;
		if (0 > startIdx || newQuery.size() <= startIdx + 1) {
			return 0.0;
		} else {
			String lookup = newQuery.get(startIdx) + " " + newQuery.get(startIdx + 1);
			double score = docIdx.get(lookup);
//			System.out.println("Comparison value for '" + lookup +"' is " + score);
			return score;
		}
	}

	//Method to create transcript file
	public void createTranscript(String ask,int round){

		int node=re.getNodeLen();

		DocumentIndex allIndexes[] = new DocumentIndex[node];

		FileOutputStream out; // declare a file output object
		PrintStream p;


		try{


			// Create a new file output stream
			// connected to "transcript.txt"
			out = new FileOutputStream("transcript.txt",true);

			// Connect print stream to the output stream
			p = new PrintStream( out );


			if(round==1)
			{
				p.println("=================");
				p.println("=================");
				p.println("");
			}
			p.println("ROUND " +round);
			p.println("QUERY " +ask);

			for(int k=0;k<node;k++)
			{

				//System.out.println("ROUND " +round);
				String relev=re.doc[k].relevant?"YES":"NO";
				allIndexes[k] = new DocumentIndex(re.doc[k]);
				p.println(

						"\nResult: " + (k + 1)
						+ "\n\tRelevant: " +relev

						+ "\n\tURL: "       +re.doc[k].URL
						+ "\n\tTitle: " + re.doc[k].Title
						+ "\n\tAbstract: " + re.doc[k].Abstract);



			}

			p.println("\nPRECISION: " +precision);
			p.println("============");
			p.println("");

			p.close();
		}
		catch (Exception e)
		{
			System.err.println ("Error writing to file");
		}

	}
}
