/*
	Copyright (c) 2010 Canadensys
*/
package net.canadensys.dataportal.vascan.utils;

import java.io.File;
import java.io.IOException;
import java.math.BigInteger;
import java.util.ArrayList;
import java.util.Comparator;
import java.util.List;
import java.util.ResourceBundle;
import java.util.Set;
import java.util.TreeSet;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

import net.canadensys.dataportal.utils.HibernateUtil;
import net.canadensys.dataportal.vascan.Taxon;
import net.canadensys.dataportal.vascan.config.ApplicationConfig;
import net.canadensys.dataportal.vascan.config.impl.InstallationConfig;

import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.queryParser.MultiFieldQueryParser;
import org.apache.lucene.queryParser.QueryParser.Operator;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.Filter;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.Sort;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.search.TopFieldDocCollector;
import org.apache.lucene.search.TopFieldDocs;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.store.LockObtainFailedException;
import org.hibernate.CacheMode;
import org.hibernate.FlushMode;
import org.hibernate.Hibernate;
import org.hibernate.HibernateException;
import org.hibernate.search.FullTextSession;
import org.hibernate.search.Search;
import org.json.JSONArray;
import org.json.JSONException;
import org.json.JSONObject;

/**
 * This class handles all FullTextSearch functionalities
 * TODO :	-clean up warnings
 * 			-upgrade Lucene
 */
public final class Fulltext {
	
	private static final int TAXON_ROW_BUFFER_SIZE = 1000;
	
	/**
	 * This method id used to create the Taxon index
	 */
	public static final void buildTaxonIndex()
		throws IOException, CorruptIndexException, LockObtainFailedException, HibernateException{
		
		//the trick here is to make sure the base structure of Lucene is there
		//because we backup the indexes folder and we want to regenerate those indexes
		IndexWriter indexWriter;
		indexWriter = new IndexWriter(FSDirectory.getDirectory(InstallationConfig.getInstance().get(InstallationConfig.LUCENE_DIR_KEY) + ApplicationConfig.LUCENE_INDEX),new StandardAnalyzer(),IndexWriter.MaxFieldLength.LIMITED);
		indexWriter.commit();
		indexWriter.close();
		
		//http://docs.jboss.org/hibernate/search/3.2/reference/en/html/manual-index-changes.html
		BigInteger taxonIndexed = BigInteger.ZERO;
		org.hibernate.Session hibernateSession = HibernateUtil.getSessionFactory().getCurrentSession();
		FullTextSession fullTextSession = Search.getFullTextSession(hibernateSession);
		fullTextSession.beginTransaction();
		
		fullTextSession.setFlushMode(FlushMode.MANUAL);
		fullTextSession.setCacheMode(CacheMode.IGNORE);
		
		BigInteger numberOfRecord = (BigInteger)hibernateSession.createSQLQuery("SELECT count(id) FROM taxon LIMIT 1;").uniqueResult();
		System.out.println(numberOfRecord + " taxons to index");
		
		while(!taxonIndexed.equals(numberOfRecord)){
			List<Taxon> taxons =  fetchTaxonData(hibernateSession,taxonIndexed.intValue(),TAXON_ROW_BUFFER_SIZE);
			for(Taxon currTaxon : taxons){
				fullTextSession.index(currTaxon);
				taxonIndexed = taxonIndexed.add(BigInteger.ONE);
			}
			System.out.println(taxonIndexed + " taxons indexed");
			fullTextSession.flushToIndexes();
			fullTextSession.clear();
			taxons.clear();
		}
		fullTextSession.getTransaction().commit();
		System.out.println("Indexes commited to DB");
	}
	
	/**
	 * This method allows to limit the number of taxon returned.
	 * This is to avoid OutOfMemoryException when trying to load all the table.
	 * @param hibernateSession
	 * @param from the number of the first row to return
	 * @param bufferSize number of row to return
	 * @return
	 */
	@SuppressWarnings("unchecked")
	private static List<Taxon> fetchTaxonData(org.hibernate.Session hibernateSession, int from, int bufferSize){
		org.hibernate.Query selectTaxonQuery =  hibernateSession.createQuery("FROM Taxon");
		selectTaxonQuery.setFirstResult(from);
		selectTaxonQuery.setMaxResults(bufferSize);
		return (List<Taxon>) selectTaxonQuery.list();
	}
	
	/**
	 * This method id used to create the namebag index
	 * @return
	 * @throws IOException
	 * @throws CorruptIndexException
	 * @throws LockObtainFailedException
	 */
	public static boolean buildNameIndex() 
		throws IOException, CorruptIndexException, LockObtainFailedException {
		
		org.hibernate.Session hibernateSession = HibernateUtil.getSessionFactory().getCurrentSession();
		org.hibernate.Query query;
		hibernateSession.beginTransaction();
		Set<Object> names = new TreeSet(new CaseInsensitiveComparator2());
		query = hibernateSession.createSQLQuery("SELECT calname FROM lookup ORDER BY calname ASC").addScalar("calname",Hibernate.STRING);
		names.addAll(query.list());
		query = hibernateSession.createSQLQuery("SELECT name FROM vernacularname ORDER BY name ASC").addScalar("name",Hibernate.STRING);
		names.addAll(query.list());


		IndexWriter indexWriter;
		indexWriter = new IndexWriter(FSDirectory.getDirectory(InstallationConfig.getInstance().get(InstallationConfig.LUCENE_DIR_KEY) + ApplicationConfig.LUCENE_NAMEBAG),new StandardAnalyzer(),IndexWriter.MaxFieldLength.LIMITED);
		for (Object o : names) {
			String name = (String)o;
			Document doc = new Document();
			
			// new anlyser & filters
			// standard tokenizer
			// isolatin1 , lowercase, standard filters
			
			
			doc.add(new Field("name",name,Field.Store.YES,Field.Index.ANALYZED));
			indexWriter.addDocument(doc);
		}
		indexWriter.close();
		hibernateSession.getTransaction().commit();
		return true;
	}
	
	public static JSONObject searchLike(String searchQuery,String limit, ResourceBundle locale){
		JSONObject results = new JSONObject();
		JSONArray array = new JSONArray();
		Integer intLimit = Integer.valueOf(limit);
		org.hibernate.Session hibernateSession = HibernateUtil.getSessionFactory().getCurrentSession();
		org.hibernate.Query query = hibernateSession.createSQLQuery("SELECT taxonid, calname, rank FROM lookup WHERE calname LIKE '" + searchQuery + "%' LIMIT :limit")
		.addScalar("taxonid",Hibernate.INTEGER)
		.addScalar("calname",Hibernate.STRING)
		.addScalar("rank",Hibernate.STRING);
		query.setParameter("limit",intLimit);
		List<Object[]> names = query.list();
		try {
			for(Object[] name : names){
			    //build and add data to JSONarray
		    	JSONObject json = new JSONObject();
				json.put("i",String.valueOf((Integer)name[0]));
				json.put("n", ((String)name[1]).toLowerCase());
				json.put("r", locale.getString("rank_" + ((String)name[2]).toLowerCase()));
			    array.put(json);
			}
		    results.put("d", array);
		} catch (JSONException e) {
			e.printStackTrace();
		}
		return results;
	}
	
	public static JSONObject searchNameBag(String searchQuery){
		JSONObject results = new JSONObject();
		JSONArray array = new JSONArray();
		// Index tools
		String[] fields = {"name"};
		IndexReader reader = null;
		IndexSearcher searcher = null;
		StandardAnalyzer analyzer = new StandardAnalyzer();
		Query query = null;
		try {
		    // load index
			reader = IndexReader.open(InstallationConfig.getInstance().get(InstallationConfig.LUCENE_DIR_KEY) + ApplicationConfig.LUCENE_NAMEBAG);
			searcher = new IndexSearcher(reader);
			
			// parse query
			// add a wildcard character at the end of the search string to match
			// partial words : "carex al*" will match "Carex albicans"
			MultiFieldQueryParser parser = new MultiFieldQueryParser(fields, analyzer);
			parser.setDefaultOperator(Operator.AND);
		    query = parser.parse(searchQuery.replace("-"," ") + "*");
			//query = parser.parse(searchQuery + "*");
		    BooleanQuery.setMaxClauseCount(searcher.maxDoc()); 
		    // top hits
		    TopDocs topHits;
		    TopFieldDocCollector collector;
		    
		    // collector
		    collector = new TopFieldDocCollector(reader, new Sort(fields), ApplicationConfig.LUCENE_MAX_RESULTS);
		    
		    // exec query
		    searcher.search(query,collector);

		    // get all top scoring documents
		    ScoreDoc scoreDocs[] =  collector.topDocs().scoreDocs;
			
			//loop over all documents and build json array
			for(ScoreDoc scoreDoc : scoreDocs){
				//System.out.println(searcher.explain(query,scoreDoc.doc).toString());
				Document document = searcher.doc(scoreDoc.doc);
				JSONObject json = new JSONObject();
				json.put("n", document.getField("name").stringValue());
				array.put(json);
			}
			results.put("d",array);
		} catch (JSONException e) {
			e.printStackTrace();
		}catch(Exception e){ 
			e.printStackTrace();
		}
		return results;
	}
	
	
	/**
	 * This function will return a jsonArray cof json objects 
	 * containing at a minimum a full scientific name, the status of that name
	 * (accepted or synonym) and it's html encoded version (<em> & <strong>). 
	 * Vernacular names, french and english, will be appended to their full 
	 * scientific name parent, or a new entry will be created (duplicate full 
	 * scientific name) ; the information added consists of vernacular name 
	 * (french or english), vernacular status it's html encoded version.
	 * 
	 * @param searchQuery
	 * @param fullResults
	 * @param jsonArray
	 * @param discriminator
	 * @return	return a jsonArray of names	
	 * 	
	 */
	private static ArrayList<JSONObject> searchCore(String searchQuery, ArrayList<JSONObject> array, char discriminator, int highestAcceptableTaxonRank){
		// Index tools
		String[] fields;
		IndexReader reader = null;
		IndexSearcher searcher = null;
		StandardAnalyzer analyzer = new StandardAnalyzer();
		Query query = null;
		// french or english vernacular name
		if(discriminator == 'f' || discriminator == 'e'){
			fields = new String[]{"vernacularnames.name", "verncularnames.namenoaccent"};
		}
		// scientific name
		else{
			fields = new String[]{"lookup.calname"};
		}

		try {
		    // load index
			reader = IndexReader.open(InstallationConfig.getInstance().get(InstallationConfig.LUCENE_DIR_KEY) + ApplicationConfig.LUCENE_INDEX);
			searcher = new IndexSearcher(reader);
			
			// parse query
			// add a wildcard character at the end of the search string to match
			// partial words : "carex al*" will match "Carex albicans"
			MultiFieldQueryParser parser = new MultiFieldQueryParser(fields, analyzer);
			parser.setDefaultOperator(Operator.AND);
		    query = parser.parse(searchQuery.replace("-"," ") + "*");

		    // get results sorted alphabetically
		    Filter filter = null;
		    TopFieldDocs topFieldDocs;
		    
		    //new method 
		    //Sort sort = new Sort(new SortField("lookup.calname",SortField.STRING_VAL));
		    //topFieldDocs = searcher.search(query, filter, searcher.maxDoc(), sort);
		    
		    topFieldDocs = searcher.search(query, filter, searcher.maxDoc(), new Sort("lookup.calname"));
		   // topFieldDocs = searcher.search(query, searcher.maxDoc(),);
		    //searcher.search(query, new Sort("fields"));
		    ScoreDoc scoreDocs[] =  topFieldDocs.scoreDocs;
		    
		    
			// loop over all documents and build json array
			for(ScoreDoc scoreDoc : scoreDocs){
				Document document = searcher.doc(scoreDoc.doc);
			
				// init json object vars
				String taxonId = "";
				String fullScientificName = "";
				String fullScientificNameHtml = "";
				String status = "";
			    String nameid = ""; 
			    String frVnStatus = "";
			    String frVn = "";
			    String frVnHtml = "";
			    String enVnStatus = "";
			    String enVn = "";
			    String enVnHtml = "";
			    
			    // taxonid
			    taxonId = document.getField("id").stringValue();

			    // fullscientificname status
			    status = document.getField("status.status").stringValue();
			    
			    // fullscientificname
			    fullScientificName = document.getField("lookup.calname").stringValue();
			    
			    // fullscientificnamehtml
			    fullScientificNameHtml = highlight(searchQuery,document.getField("lookup.calnamehtml").stringValue());

			    // if we are looking for vernacular names
			    
			    if(discriminator == 'f' || discriminator == 'e'){
				    //vernacular names, based on language
				    if(document.getField("vernacularnames.name") != null){
				    	
				    	// loop over all available french / english names based on discriminator and include the ones that match the query
				    	int fieldPos = 0;
				    	String lang[] = document.getValues("vernacularnames.language");
				    	String statuses[] = document.getValues("vernacularnames.status.status");
				    	String names[] = document.getValues("vernacularnames.name");
				    	for(fieldPos = 0; fieldPos < lang.length; fieldPos++){
				    		// reinit names and statuses
				    		frVnStatus = "";
				    		frVn = "";
				    		enVn = "";
				    		enVnStatus = "";
				    		
				    		// grab the name at position fieldPos in the vernacularname field, based on discriminator
					    	if(lang[fieldPos].equals(ApplicationConfig.LANGUAGE_FR) && discriminator == 'f'){
					    		frVnStatus = statuses[fieldPos];
				    			frVn = names[fieldPos];	
				    		}
				    		else if(lang[fieldPos].equals(ApplicationConfig.LANGUAGE_EN) && discriminator == 'e'){
					    			enVnStatus = statuses[fieldPos];
				    				enVn = names[fieldPos];
				    		}	
			    			
					    	/* if the name matches the search query, add it the the json object
					    	 * the name has a fullscientificname parent; if that full name exists
					    	 * and no french or english name is attached to it, append it to the
					    	 * existing json object at index pos in jsonArray ; if a name is already
					    	 * appended to full name, insert a new json object in array
					    	 */
					    	
					    	
					    	boolean notfound = true;
					    	if(match(searchQuery,names[fieldPos])){
			    			    int pos = array.size();
			    			    for(int j = 0; j<array.size(); j++){
			    			    	JSONObject o = array.get(j);
			    			    	if(o.getString("i").equals(taxonId) && o.getString(String.valueOf(discriminator)).equals("")){
			    			    		if(discriminator == 'e'){
			    			    			frVn = o.getString("f");
				    			    		frVnStatus = o.getString("fs");
				    			    	}
			    			    		notfound = false;
			    			    		pos = j;
			    			    		break;
				    			    }
			    			    }
			    			    
			    			    JSONObject json = new JSONObject();
			    			    //build and add data to JSONarray
			    			    json.put("i",taxonId);
			    			    json.put("s",status);
			    			    json.put("n", fullScientificName);
			    			    json.put("nh", highlight(searchQuery,fullScientificNameHtml));
			    			    json.put("fs", frVnStatus);
			    			    json.put("f", frVn);
			    			    json.put("fh", highlight(searchQuery,frVn));
			    			    json.put("es", enVnStatus);
			    			    json.put("eh", highlight(searchQuery,enVn));
			    			    json.put("e", enVn);
			    			    if(notfound == false){
			    			    	array.set(pos,json);
			    			    }
			    			    else{
			    			    	array.add(json);
			    			    }	
			    			}
				    	}
				    }			    
			    }
			    else{
				    JSONObject json = new JSONObject();
				    //build and add data to JSONarray
				    json.put("i",taxonId);
				    json.put("s",status);
				    json.put("n", fullScientificName);
				    json.put("nh", fullScientificNameHtml);
				    json.put("fs", frVnStatus);
				    json.put("f", frVn);
				    json.put("fh", frVnHtml);
				    json.put("es", enVnStatus);
				    json.put("e", enVn);
				    json.put("eh", enVnHtml);
				    //jsonArray.put(json);
				    array.add(json);
			    }
			}
			

		}catch(Exception e){ 
			e.printStackTrace();
			
		}
		return array;
	}
	
	public static void quicksort(ArrayList<JSONObject> sortedResults, int left, int right){
		int pivot = 0;
		int leftIndex = left;
		int rightIndex = right;
		if(right - left + 1 > 1){
			pivot = (left + right) / 2;
			while(leftIndex <= pivot && rightIndex >= pivot){
				try {
					String compLeft = (String)sortedResults.get(leftIndex).get("n");
					String compPivot = (String)sortedResults.get(pivot).get("n");
					String compRight = (String)sortedResults.get(rightIndex).get("n");
					while(compLeft.compareTo(compPivot) < 0 && leftIndex <= pivot){
						leftIndex++;
						compLeft = (String)sortedResults.get(leftIndex).get("n");
					}
					while(compRight.compareTo(compPivot) > 0 && rightIndex >= pivot){
						rightIndex--;
						compRight = (String)sortedResults.get(rightIndex).get("n");
					}
				} catch (JSONException e) {
					// TODO Auto-generated catch block
					e.printStackTrace();
				}

				JSONObject jsonLeft = sortedResults.get(leftIndex);
				JSONObject jsonRight = sortedResults.get(rightIndex);
				sortedResults.set(leftIndex, jsonRight);
				sortedResults.set(rightIndex, jsonLeft);
				leftIndex++;
				rightIndex--;
				if(leftIndex - 1 == pivot)
					pivot = rightIndex = rightIndex + 1;
				else if(rightIndex + 1 == pivot)
					pivot = leftIndex = leftIndex - 1;	
			}
			quicksort(sortedResults, left, pivot - 1);
			quicksort(sortedResults, pivot + 1, right);
		}
	}
	
	public static JSONObject search(String searchQuery, boolean searchFullScientificName, boolean searchVernacularName, int highestAcceptableTaxonRank){		
	    JSONObject results = new JSONObject();
	    JSONArray jsonArray = new JSONArray();
	    ArrayList<JSONObject> sortedResults = new ArrayList<JSONObject>();
		// index data
		IndexSearcher is = null;
		StandardAnalyzer analyzer = null;
		analyzer = new StandardAnalyzer();
		Query query = null;
		File file = new File(ApplicationConfig.LUCENE_INDEX);

		try {
			if(searchFullScientificName){
					
				searchCore(searchQuery, sortedResults,'n',highestAcceptableTaxonRank);
			}
			
			if(searchVernacularName){
				searchCore(searchQuery, sortedResults,'f',highestAcceptableTaxonRank);
				searchCore(searchQuery, sortedResults,'e',highestAcceptableTaxonRank);
			}
			
			if(sortedResults.size() > 0)
				quicksort(sortedResults,0,sortedResults.size()-1);
			
			for(int i = 0; i < sortedResults.size(); i++){
				jsonArray.put(sortedResults.get(i));
			}
			
			
			//build final json data packet
			results.put("d", jsonArray);
			results.put("t", results.getJSONArray("d").length());

		}catch(Exception e){ 
			e.printStackTrace();
			
		}

		return results;
	}

	
	private static String highlight(String term, String string){
		String tokens[] = term.split(" ");
		StringBuffer sb= new StringBuffer();
		for(String token : tokens){
			sb = new StringBuffer();
			// pattern with a look behind ; match only keywords that do not start with a < or / (start or close of tags) ; 
			// this will prevent the matching of the <em> tag for search word "em"...
			Pattern p = Pattern.compile("(?<![\\</emstrong])("+token+")" ,Pattern.CASE_INSENSITIVE);
			
		    Matcher m = p.matcher(string);
		    while (m.find()) {
		        m.appendReplacement(sb, "<strong>" + m.group() + "</strong>");
		    }
		    m.appendTail(sb);
		    string = sb.toString();
		}
		
	    return sb.toString();
	}   

	private static boolean match(String term, String string){
		Pattern p = Pattern.compile(term ,Pattern.CASE_INSENSITIVE);
	    Matcher m = p.matcher(string);
	    return m.find();
	}
	
	
	public static class CaseInsensitiveComparator implements Comparator {
	    public int compare(Object element1, Object element2) {
	        String lowerE1 = ((String)element1).toLowerCase().replaceAll(" ","");
	        String lowerE2 = ((String)element2).toLowerCase().replaceAll(" ","");
	        return lowerE1.compareTo(lowerE2);
	  }
	}
	public static class CaseInsensitiveComparator2 implements Comparator<String> {
	    public int compare(String element1, String element2) {
	        String lowerE1 = element1.toLowerCase().replaceAll(" ","");
	        String lowerE2 = element2.toLowerCase().replaceAll(" ","");
	        return lowerE1.compareTo(lowerE2);
	  }
	}
}
