package de.connecttext.dao.solr;

import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.net.URI;
import java.net.URISyntaxException;
import java.net.URL;
import java.net.URLConnection;
import java.net.URLEncoder;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

import org.json.simple.JSONArray;
import org.json.simple.JSONObject;
import org.json.simple.JSONValue;

import de.connecttext.dao.IAcquireDao;
import de.connecttext.model.TagVector;
import de.connecttext.model.TextDocument;
import de.connecttext.services.Stopwatch;

/**
 * Abstract Class of the SolrDao. Implementation of this class can be iterated
 * over.
 * 
 * @author conrad
 * 
 */
public abstract class SolrDao implements IAcquireDao, Iterable<TextDocument>{

	protected String hostName = "localhost";
	protected String servletName = "solr";
	protected String requestHandlerName = "request";
	protected String dataimportHandlerName = "dataimport";
	protected String highlightingMarkupStart = "<em>";
	protected String highlightingMarkupEnd = "</em>";

	// Name of the field containing all the text which is to be analyzed
	protected String fieldName = "textAnalyzed";
	protected String fieldNameOriginal = "textOriginal";

	public long elapsedTime1 = 0;
	public long elapsedTime2 = 0;
	public long elapsedTime3 = 0;
	/**
	 * Ist die Zahl aller Dokumente im Index
	 */
	protected long docsInIndex = -1;

	Stopwatch stopwatch = new Stopwatch();

	public abstract TextDocument get(int row);

	/**
	 * retrieves Id of specified Document
	 * 
	 * @param row
	 *            specifies which Document is to be retrieved
	 * @return String id
	 * @throws IOException
	 */
	protected String retrieveSolrId(int row) throws IOException {

		// Welche id hat das Document?

		String urlString = new String("select?q=*:*&start=" + row
				+ "&rows=1&fl=" + "id" + "&wt=json");
		String responseString = urlRequest(urlString);
		// System.out.println(responseString);
		JSONObject root = (JSONObject) JSONValue.parse(responseString);
		JSONObject response = (JSONObject) root.get("response");
		JSONArray docs = (JSONArray) response.get("docs");
		JSONObject doc = (JSONObject) docs.get(0);
		String id = (String) doc.get("id");

		return id;
	}

	/**
	 * Querys solr for the Count of Documents in the index
	 * 
	 * @throws IOException
	 */
	protected void retrieveDocsInIndex() throws IOException {

		String urlString = new String("select?q=*:*&fq=&start=0&rows=0&wt=json");

		String responseString = urlRequest(urlString);
		JSONObject root = (JSONObject) JSONValue.parse(responseString);
		JSONObject response = (JSONObject) root.get("response");
		this.docsInIndex = (Long) response.get("numFound");
		System.out.println(docsInIndex);

	}

	protected List<TagVector> retrieveTagVectors(String solrId)
			throws IOException {
		// Die tagVectors werden erstellt

		stopwatch.start();

		String urlString = new String("select?q=id:" + solrId
				+ "&fq=&start=0&rows=1&fl=" + fieldName
				+ "&wt=json&qt=tvrh&tv.tf=true&tv.df=true&tv.tf_idf=true");
		String responseString = urlRequest(urlString);
		// System.out.println(urlString);
		elapsedTime1 += stopwatch.getElapsedTime();
		stopwatch.reset();

		/**
		 * SolrJSON-Response wird in diesem Format erwartet: {
		 * 
		 * { "responseHeader":{ "status":0, "QTime":3},
		 * "response":{"numFound":12,"start":0,"docs":[ {
		 * "id":"/Users/conrad/server/textResources/tsomh10.txt"}] },
		 * "termVectors":[ "doc-0",[
		 * "uniqueKey","/Users/conrad/server/textResources/tsomh10.txt"],
		 * "$fieldName (s.o.)",[ "000",[ "tf",3, "df",10, "tf-idf",0.3], "04",[
		 * "tf",1, "df",10, "tf-idf",0.1]], "uniqueKeyFieldName","id"]}
		 */
		stopwatch.start();

		JSONObject root = (JSONObject) JSONValue.parse(responseString);
		// JSONObject response = (JSONObject) root.get("response");
		// int qTime = (Integer) response.get("QTime");
		JSONArray termVectors = (JSONArray) root.get("termVectors");
		JSONArray doc = (JSONArray) termVectors.get(1);
		JSONArray termVectorsForField = (JSONArray) doc.get(3);

		elapsedTime2 += stopwatch.getElapsedTime();
		stopwatch.reset();

		// field ist nun der Array: [ "000",[ "tf",3, "df",10, "tf-idf",0.3],
		// "04",[ * "tf",1, "df",10, "tf-idf",0.1]] (s.o)

		// Jeder Kindstring von field ist ein Term!
		// Jeder Kindarray von field ist ein TermVektor mit Terminfos!
		// Die kommen abwechselnd vor!
		stopwatch.start();

		List<TagVector> tagVectors = new ArrayList<TagVector>();
		for (int i = 0; i < termVectorsForField.size(); i++) {

			String term = (String) termVectorsForField.get(i);

			TagVector tagVector = new TagVector();
			tagVector.setLemmatizedTerm(term);

			// ///////////////////////////////////////
			i++; // ////////////////////////////////// !!
			// ///////////////////////////////////////

			// Hier muss der Array genau abgebildet sein
			// Kann je aktivierung der TermVektorkomponenten varrieren! also
			// aufpassen

			JSONArray termVector = (JSONArray) termVectorsForField.get(i);

			Double tfidf = (Double) termVector.get(5);

			Long tf = (Long) termVector.get(1);
			tagVector.setTfidf(tfidf);
			tagVector.setTf(tf);

			tagVectors.add(tagVector);

		} // end for:termVectorsForField

		elapsedTime3 += stopwatch.getElapsedTime();
		stopwatch.reset();

		return tagVectors;
	}

	/**
	 * General Method for Accessing Content of a URL via http.
	 * 
	 * @param URL
	 *            url Url which is to be retrieved
	 * @return String responseString containing the httpresponse
	 * @throws IOException
	 */
	protected String urlRequest(String urlString) throws IOException {

		try {
			urlString = (new URI(urlString)).toString();
		} catch (URISyntaxException e) {
			e.printStackTrace();
		}
		URL url = new URL("http", hostName, 8983, "/" + servletName + "/"
				+ urlString);

		URLConnection urlConnection = url.openConnection();
		BufferedReader in = new BufferedReader(new InputStreamReader(
				urlConnection.getInputStream()));

		String inputLine;
		String responseString = "";

		// Der komplette Resonse wird zu einem String agluttiniert
		while ((inputLine = in.readLine()) != null)
			responseString = responseString.concat(inputLine);
		in.close();
		return responseString;

	}

	/**
	 * Tries to retrieve the Original Term of the tag by querying a second field
	 * which contains the non-normalized form of the tag. The name of the field
	 * is specified in the memberfield "fieldNameOriginal". this is a lot faster
	 * than highlighting but less accurate
	 * 
	 * @param id
	 * @param tag
	 * @return
	 * @throws IOException
	 */
	public String retrieveOriginalTerm(String id, String tag)
			throws IOException {

		String toReturn = "";
		String termString = "";
		if (tag != null) {

			if (tag.contains(" ")) {
				// If the Tag is multiGrammed, we do a regex to query the index.
				// note that this is incredibly slow compared to one grammed
				// tags.
				termString = "terms.regex=";
				String tempStrs[] = tag.split(" ");
				for (String tempStr : tempStrs) {
					termString = termString.concat(tempStr + ".*");
				}
			} else {
				// If the Tag is not multiGrammed, e.g. one single term, we do a
				// preifx query as we assume that solr does not alter the prefix
				// when lemmatizing. if this happen, cut it!
				termString = "terms.prefix=";
				termString = termString.concat(tag);
			}
		}

		String urlString = new String("terms?terms.fl=" + fieldNameOriginal
				+ "&" + termString
				+ "&wt=json&indent=on&terms=true&terms.limit=1");

		String responseString = urlRequest(urlString);

		// System.out.println(urlString);
		// System.out.println(responseString);
		JSONObject root = (JSONObject) JSONValue.parse(responseString);
		JSONArray terms = (JSONArray) root.get("terms");
		if (terms.size() > 0 ) {
			JSONArray fieldNameOriginal = (JSONArray) terms.get(1);	
			if (fieldNameOriginal.size() > 0)
				toReturn = (String) fieldNameOriginal.get(0);
		}

		// Get the first Termresult and sell it as tag_alt. HEHEHEH
	

		return toReturn;
	}

	/**
	 * Tries to guess the non-normalized Form of the given term in the document.
	 * used to enrich the Interface with more than only the lemmatized form of
	 * the term which is quite unreadable
	 * 
	 * @param id
	 * @param tag
	 * @return
	 * @throws IOException
	 */
	public String retrieveOriginalTerm2(String id, String tag)
			throws IOException {

		// return this String
		String tagAlt = null;
		if (tag != null)
			tag = tag.replace(" ", "%20");

		String urlString = new String("select?q=" + fieldName + ":" + tag
				+ "&fq=id:" + id + "&start=0&rows=1&fl=id&wt=json&hl.fl="
				+ fieldName + "&hl=true&hl.fragsize=1&hl.snippets=5");

		String responseString = urlRequest(urlString);
		System.out.println(urlString);
		JSONObject root = (JSONObject) JSONValue.parse(responseString);
		JSONObject highlighting = (JSONObject) root.get("highlighting");
		// if nothing found, increase maxAnalyzedChars (slow?)
		if (!highlighting.containsKey(id)) {
			urlString = new String(
					"select?q="
							+ fieldName
							+ ":"
							+ tag
							+ "&fq=id:"
							+ id
							+ "&start=0&rows=1&fl=id&wt=json&hl.fl="
							+ fieldName
							+ "&hl=true&hl.fragsize=1&hl.snippets=5&hl.maxAnalyzedChars=1000000");
			responseString = urlRequest(urlString);
			// System.out.println(url);
			root = (JSONObject) JSONValue.parse(responseString);
			highlighting = (JSONObject) root.get("highlighting");

		}
		// If nothing matches in solr and we are risking to get no adequat Alt
		// for the specified tag, there is something very strange up with the
		// solr query syntax. => steal the Tag from other sources!
		if (!highlighting.containsKey(id)) {
			urlString = new String(
					"select?q="
							+ fieldName
							+ ":"
							+ tag
							+ "&start=0&rows=1&fl=id&wt=json&hl.fl="
							+ fieldName
							+ "&hl=true&hl.fragsize=1&hl.snippets=5&hl.maxAnalyzedChars=1000000");
			responseString = urlRequest(urlString);
			// System.out.println(url);
			root = (JSONObject) JSONValue.parse(responseString);
			highlighting = (JSONObject) root.get("highlighting");

		}
		// Try again:
		if (highlighting.containsKey(id)) {

			JSONObject idObject = (JSONObject) highlighting.get(id);
			// System.out.println("got highlighting id");
			if (idObject.containsKey(fieldName)) {
				Map<String, Integer> guessedTerms = new HashMap<String, Integer>();
				// System.out.println("got text");
				JSONArray fieldNameArray = (JSONArray) idObject.get(fieldName);
				for (int i = 0; i < fieldNameArray.size(); i++) {
					// System.out.println("reading terms!");
					String responseFragment = (String) fieldNameArray.get(i);
					Pattern p = Pattern.compile("(?s).*<em>(.+)</em>.*");
					Matcher m = p.matcher(responseFragment);
					if (m.matches()) {
						String guessedTerm = m.group(1);

						addToBag(guessedTerms, guessedTerm);

					} else {

						// Pattern recognition did fail for some mysterious
						// reason. Do it anyways!

						int indexOfHighlightingMarkupStart = responseFragment
								.indexOf(highlightingMarkupStart);
						int indexOfHighlightingMarkupEnd = responseFragment
								.indexOf(highlightingMarkupEnd);
						if (indexOfHighlightingMarkupStart != -1) {

							String guessedTerm = responseFragment.substring(
									indexOfHighlightingMarkupStart
											+ highlightingMarkupStart.length(),
									indexOfHighlightingMarkupEnd);
							addToBag(guessedTerms, guessedTerm);
							System.out.println(guessedTerm);
						}
					}

				} // end for fieldNameArray

				String tempTerm = null;
				int max = 0;
				for (String term : guessedTerms.keySet()) {
					if (guessedTerms.get(term) > max)
						tempTerm = term;
				}
				tagAlt = tempTerm;
				System.out.println(tagAlt);
			} else {
				// System.out.println("Nichts gefunden! " + url);
			}
		} else {
			// System.out
			// .println("Error while trying to retrieve OriginalTerm for "
			// + tag + ": Did not get highlighted Field");
			// System.out.println("Nichts gefunden! unten rum " + url);

		}
		return tagAlt;

	}

	/**
	 * Containermethod to emulate a bagstructure using a Map: The given Map
	 * counts the Occurences of the given String when called repeatedly
	 * 
	 * @param bag
	 * @param addMe
	 */
	private void addToBag(Map<String, Integer> bag, String addMe) {

		if (bag.containsKey(addMe))
			bag.put(addMe, bag.get(addMe) + 1);
		else
			bag.put(addMe, 1);

	}

	/**
	 * Returns the count of all indexed Documents. This value is retrieved by
	 * retrieveDocInIndex() if that hasnt been done before.
	 */
	public int length() {
		if (docsInIndex == (long) -1)
			try {
				retrieveDocsInIndex();
			} catch (IOException e) {
				e.printStackTrace();
			}
		return (int) docsInIndex;
	}

	public Iterator<TextDocument> iterator() {
		try {
			return new SolrDocumentIterator(this);
		} catch (IOException e) {
			e.printStackTrace();
			return null;
		}
	}

}
