package complet.db;

import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.TreeMap;
import java.util.TreeSet;

import org.neo4j.graphdb.Direction;
import org.neo4j.graphdb.DynamicRelationshipType;
import org.neo4j.graphdb.Node;
import org.neo4j.graphdb.Relationship;
import org.neo4j.graphdb.Transaction;
import org.neo4j.kernel.EmbeddedGraphDatabase;

import complet.helper.ConfigHelper;

/**
 * TODO: Mich wundert dass keine ü's in der datenbank sind, die scheinen zu ue aufgelöst zu sein ... habe extra ne funktion geschrieben die das encoding berücksichtigt
 * TODO: kann ich in neo4j sicherstellen, dass relationen in der reihenfolge in der sie eingefüg werden im getRelationships iterator erscheinen?
 * TODO: retrieval als ein traverser implementieren, weil neo4j das vielleicht am schnellsten kann?
 * 
 * Algo: 
 * we have one global prefix map.
 * for every node we sort relationships descending by edge weight
 * for every end node we look up in how many prefixes it will fit and add it to this prefix edge
 * 
 * heuristic: every edge has to fit at least one prefix! (the longest possible prefix) and it will be entered there! of the consequently shorter prefixes are not filled it will also be entered there.
 * 
 * define a certain maximum number of allowed edges per prefix (I would say something between 50 and 100) could also be tested!
 * 
 * so here is the rule:
 * 1.) start with the most probable word / edge!
 * 2.) find the longest prefix from the global prefix map
 * 3.) enter it to the corresponding edge type (even if amount of edges is already full).
 * 4.) as long as sub prefix edges are not full: also enter it there.
 * 5.) repeat with the next least propable edge
 * 
 * 
 * what about fillings? I can't just stop at "n" relations of a prefix and type. <-- ??!? 
 * 
 * TODO: Funktion, die gegeben ein Wort den längsten prefix sucht
 * TODO: Funktion, die alle relationen eines types absteigend nach edgeweight sortiert ausgibt
 * TODO: vermutlich eine Klasse für relationen und gewichte mit comperator
 * TODO: Verwaltung von prefix lookups
 * TODO: parameters are not included to look up (can be easy done by multilication with parameter array BUT it would make sense to save this multiplication and hardcode the parameters!) That is why I did not include them
 * @author rpickhardt
 */
public class TransformDB {
	static EmbeddedGraphDatabase oldDB;
	static ArrayList<String> letters;
	static EmbeddedGraphDatabase newDB;
	static ArrayList<String> gWords;
	static int gcnt = 0;
	
	static TreeMap<Double, String> TopK; 
	
	static HashMap<String,Integer> prefixMap;
	static HashMap<String, Node> newDBWords;
	
	TransformDB(){
		initialize();
	}
	
	/**
	 * instead of calling the constructor in stand alone jave programs
	 */
	private static void initialize(){
		oldDB = new EmbeddedGraphDatabase("/home/rpickhardt/workspace/db_ngrams_ucsc_r5.db");
		newDB = new EmbeddedGraphDatabase("/home/rpickhardt/workspace/db_ngrams_optimized.db");
		gWords = getSortedListOfWords();
		initLetters();
		prefixMap = new HashMap<String,Integer>();
		newDBWords = new HashMap<String, Node>();
	}
	
	/**
	 * initialized the global array letters for backtracking with all the latters 
	 * that are currently saved in the graph db.
	 * 
	 * though very inefficient this method is implemented to respect the correct 
	 * encoding!
	 */
	 private static void initLetters() {
		 letters = new ArrayList<String>();
		 for (String s:gWords){
			 for (int i = 0;i<s.length();i++){
				 String l = "" + s.charAt(i);
				 if (letters.contains(l))continue;
				 else letters.add(l);
			 }
		 }
		 
	}

	public static void main(String[] args) {
		 //oldDB = new EmbeddedGraphDatabase("/home/rpickhardt/workspace/db_ngrams_ucsc_r5.db");
		 initialize();
		 Collections.sort(gWords);
		 generatePrefixMap("",gWords);
		 System.out.println(prefixMap.size());
		 System.out.println(longestPossiblePrefix("versicherungsvertreter") + " " + prefixMap.get(longestPossiblePrefix("versicherungsvertreter")));
		 System.out.println(longestPossiblePrefix("verfassungsschutz") + " " + prefixMap.get(longestPossiblePrefix("verfassungsschutz")));
		 
//		 createNodesOfNewDB();
		 initializeNewDBWordHashMap();		 
//		 TransferOldRelationsToNewDB();
		 
		 //Ich gehe über die
		 //TODO: need better data structure to store result (e.g. a top5 data structure)
		 
		 // could be a hashmap tree is logarithmic look up with string comparisons. we don't need string order!
		 // TODO: check performance with hashMap instead of TreeMap!
		 HashMap<String, Double> result = new HashMap<String, Double>();
		 ResetTopk();
		 Suggestions("gehe",3,"br", result);
		 Suggestions("über",2,"br", result);
		 Suggestions("die",1,"br",result);
		 OutTopk();
		 
		 newDB.shutdown();
		 oldDB.shutdown();
	 }
	private static void OutTopk() {
		// TODO Auto-generated method stub
		
	}

	private static void ResetTopk() {
		TopK = new TreeMap<Double, String>();		
	}
	
	private static void TopkSuggestions(String word, Double weight, int k) {
		TopK.put(weight, word);
//		if (TopK.size()>k)
//TODO:implement method!			TopK.resiz();
	}


	/**
	 * This function is essentially following the "heuristic" described in the class. 
	 * 
	 */
	private static void TransferOldRelationsToNewDB() {
		int nodeCnt=0; 
		Transaction tx = newDB.beginTx();
		 try {
			 for (Node n:oldDB.getAllNodes()){
				 System.out.println((String)(n.getProperty(ConfigHelper.NAME_KEY)+ " ==>\t"));
				 if (nodeCnt++%20==0){
					 System.out.println(nodeCnt);
					 tx.success();
					 tx.finish();
					 tx = newDB.beginTx();
				 }
				 for (int edgeType=1;edgeType<4;edgeType++){
					 System.out.print("\t"+edgeType);
//					 int edgeType = 1;
					 TreeMap<Double, ArrayList<Relationship>> tm = getSortedRelationshipsForNode(n, DynamicRelationshipType.withName("rel:"+edgeType), ConfigHelper.RELLOC_KEY);
					 int cnt = 0;					 
					 Node newNode = newDBWords.get(((String)(n.getProperty(ConfigHelper.NAME_KEY))).toLowerCase());
					 if (newNode==null)continue;
					 // prefix as key, value: hashmap (of all words belonging to the key) key = word value = rank 
					 HashMap<String, HashMap<String, Double>> edges = new HashMap<String, HashMap<String, Double>>();
					 //TODO: can probably ged rid of this very expensive double hashmap by saving node degree as a property or caching node degree / prefix degree otherwise (maybe in prefix map?)
					 for (Double key:tm.descendingKeySet()){
						 String value = (String)(tm.get(key).get(0).getEndNode().getProperty(ConfigHelper.NAME_KEY));
						 String prefix = longestPossiblePrefix(value.toLowerCase());
						 boolean first = true;
						 // loop over shorter becoming prefix
						 do {
							 if (edges.containsKey(prefix)){
								 HashMap<String, Double> tmp = edges.get(prefix);
								 if (first || tmp.size() < 100){ //TODO: IMPORTANT! this constant 100 needs to be changed!
									 tmp.put(value, key);
									 edges.put(prefix, tmp);
									 Node dest = newDBWords.get(value.toLowerCase());
									 if (dest == null)continue;
									 Relationship rel = newNode.createRelationshipTo(dest, DynamicRelationshipType.withName(prefix+":"+edgeType));
									 rel.setProperty("weight", key);
								 }
								 first = false;
							 }
							 else {
								 HashMap<String, Double> tmp = new HashMap<String, Double>();
								 tmp.put(value, key);
								 edges.put(prefix, tmp);
								 Node dest = newDBWords.get(value.toLowerCase());
								 if (newNode != null && dest != null){								 
									 Relationship rel = newNode.createRelationshipTo(dest, DynamicRelationshipType.withName(prefix+":"+edgeType));
									 rel.setProperty("weight", key);
								 }
								 first = false;
							 }
//							 System.out.println(key+ "\t" + value + "\tprefix: " + prefix);
						 //TODO: leere kante "rel:1,..rel:3" einfügen (-:
						 }while (!(prefix = prefix.substring(0, prefix.length()-1)).equals(""));
						 //if (cnt++>5)break;
					 }
				 }
			 }
			 tx.success();
		 }catch(NullPointerException e){
			 e.printStackTrace();
		}finally{
			 tx.finish();
		 }		
	}

	/**
	 * TODO: here it might be usfull to introduce caching of the query s.th. frequent querries don't need to be looked up all the time
	 * TODO: other caching strategie could really cach combined results in the case if longer prefix does not give more information (e.g. user does not take over the words) this can be implemented to network protocoll returning longest possible prefix is given suggestions won't change even if prefix is getting longer! saves server and network bandwidth
	 * @param word
	 * @param edgeType
	 * @param prefix
	 * @return
	 */
	private static void Suggestions(String word, int edgeType, String prefix, HashMap<String, Double> result){
		Node startNode = newDBWords.get(word);
		if (startNode==null)return;
		// get relations with complete prefix of type
		DynamicRelationshipType dynType = DynamicRelationshipType.withName(prefix+":"+edgeType);
		if(!startNode.hasRelationship(dynType, Direction.OUTGOING)){
			dynType = DynamicRelationshipType.withName(longestPossiblePrefix(prefix)+":"+edgeType);
			if(!startNode.hasRelationship(dynType, Direction.OUTGOING)){
				return;
			}
			else {
				//TODO: decide if happy with this (maybe we just don't have suggestions)
				ProcessEdges(startNode.getRelationships(dynType,Direction.OUTGOING), result);
			}
		}
		else {
			ProcessEdges(startNode.getRelationships(dynType,Direction.OUTGOING), result);
		}
	}

	private static void ProcessEdges(Iterable<Relationship> relationships,
			HashMap<String, Double> result) {
		for (Relationship rel:relationships){
			String word = (String)(rel.getEndNode().getProperty("word"));
			Double weight = (Double)rel.getProperty("weight");
			if (result.containsKey(word)){
				weight = weight + result.get(word);
			}
			result.put(word, weight);
			TopkSuggestions(word,weight,5);//TODO: could give any k!!
		}
		// TODO Auto-generated method stub
		
	}

	/**
	 * puts all wordnodes of data base to a hashmap as substitute boost of lucene index
	 * TODO: also needs to fetch prefixNodes to the prefix hashmap 
	 */
	private static void initializeNewDBWordHashMap() {
		newDBWords.clear();
		prefixMap.clear();
		for (Node n: newDB.getAllNodes()){
			if (!n.hasProperty("type"))continue;
			String type = (String)n.getProperty("type");
			if (type.equals("word")){
				newDBWords.put((String)n.getProperty("word"), n);
			}
			if (type.equals("prefix")){
				String prefix = (String)n.getProperty("word");
				Integer count = (Integer)n.getProperty("count");
				prefixMap.put(prefix, count);
			}
		}		
	}

	/**
	 * TODO: getter functions are mandetory that go through all nodes and put them to 
	 * newDBWords and prefixMap Hashmap according to their type "word" or "prefix"
	 */
	private static void createNodesOfNewDB() {
		// put words as nodes to graph DB save them in hashmap
		Transaction tx = newDB.beginTx();
		try {
			for (String word:gWords){
				Node n = newDB.createNode();
				n.setProperty("type","word");
				n.setProperty("word", word);
				newDBWords.put(word, n);
			}
			tx.success();
		}finally{
			tx.finish();
		}
		
		tx = newDB.beginTx();
		try {
			for (String word:prefixMap.keySet()){
				Node n = newDB.createNode();
				n.setProperty("type","prefix");
				n.setProperty("word", word);
				n.setProperty("count", prefixMap.get(word));
			}
			tx.success();
		}finally{
			tx.finish();
		}
		
	}

	/**
	 * calculates a treemap with all weights as keys and array lists of relations (with same weight) as values.
	 * In this way on a later state we can access the relationships with descending weight
	 * @param n
	 * @param dynType
	 * @param weightPropertyKey
	 * @return
	 */
	private static TreeMap<Double, ArrayList<Relationship>> getSortedRelationshipsForNode(Node n, DynamicRelationshipType dynType, String weightPropertyKey){
		TreeMap<Double, ArrayList<Relationship>> tm = new TreeMap<Double, ArrayList<Relationship>>();
		for (Relationship rel:n.getRelationships(dynType, Direction.OUTGOING)){
			double weight = (Double)rel.getProperty(weightPropertyKey);
			if (tm.containsKey(weight)){
				ArrayList<Relationship> tmp = tm.get(weight);
				tmp.add(rel);
				tm.put(weight, tmp);
			}
			else{
				ArrayList<Relationship> tmp = new ArrayList<Relationship>();
				tmp.add(rel);
				tm.put(weight, tmp);
			}
		}
		return tm;
	}
	
	/**
	 * backtracking to generate all prefixs from a sorted list of words
	 * prefixes will be saved to global hashMap "prefixMap"
	 * TODO: prefix map has to be saved to the new data base and has to be persistant in retrieval? for a query it has to be easy to find the longest possible prefix: That is easy: just hashing all possible substrings of the quer
	 * @param prefix current prefix will be extended by a letter every time the function is called
	 * @param words sorted list of words!
	 */
	public static void generatePrefixMap(String prefix, ArrayList<String> words){
		for(String letter:letters){
			int firstIndex = firstIndexOfElementWithPrefix(prefix+letter, words);
			if (firstIndex>-1){
				int lastIndex = lastIndexOfElementWithPrefix(prefix + letter, words);
				if ((lastIndex-firstIndex)<1)continue;
				prefixMap.put(prefix+letter, lastIndex-firstIndex);
				System.out.println(prefix+letter + " " + (lastIndex-firstIndex));
				if ((lastIndex-firstIndex) < 100)continue;
				generatePrefixMap(prefix+letter, new ArrayList<String>(words.subList(firstIndex, Math.min(lastIndex, words.size()))));
			}
		}
	}
	
	/**
	 * returns the longest possible prefix from the prefixMap given a word
	 * @param query - word for which the longest substring is searched
	 * @return longest prefix (which can be translated to a dynamic RelationshipType)
	 */
	public static String longestPossiblePrefix(String query){
		String retValue = "";
		while (!query.equals("")){
			if (prefixMap.get(query)!=null)
				return query;
			query = query.substring(0, query.length()-1);
		}
		return retValue;
	}
	
	/**
	 * basically performing binary search with prefixmatching through an array of words <String>
	 * we want to find the very first element in the array containing this prefix
	 * @param prefix 
	 * @param words
	 * @return returns the array index of the element containing this prefix and -1 otherwise
	 */
	public static int firstIndexOfElementWithPrefix(String prefix, ArrayList<String> words){
		return helperBinSearch(prefix, words, true);
	}

	/**
	 * basically performing binary search with prefixmatching through an array of words <String>
	 * we want to find the very last element in the array containing this prefix
	 * @param prefix 
	 * @param words
	 * @return returns the array index of the element containing this prefix and -1 otherwise
	 */
	public static int lastIndexOfElementWithPrefix(String prefix, ArrayList<String> words){
		return helperBinSearch(prefix, words, false);
	}

	/**
	 * modified binary search to find a prefix in an sorted Array of Strings
	 * if first flag is set true the very first element containing this prefix will be returned
	 * otherwise the very last
	 * @param prefix 
	 * @param words - 
	 * @param first - boolean flag to indicate whether the first or the last element containing the prefix shall be returned
	 * @return returns the array index of the element containing this prefix and -1 otherwise
	 */
	private static int helperBinSearch(String prefix, ArrayList<String> words, boolean first) {
		int l=0;
		int r=words.size();
		int m;
		int retValue = -1;
		
		while (Math.abs(r-l)>2){
			m=(l+r)/2;
			String tmp = words.get(m);
			if (tmp.contains(prefix)){
				//element das prefix containt gefunden (-:
				retValue = m;
				if (first) 	//erstes Element könnte weiter links stehen!
					r = m;
				else 		// erstes Element könnte weiter rechts stehen!
					l = m;
			}
			else {
				if (prefix.compareTo(tmp)<0){
					r = m;
				}
				else if (prefix.compareTo(tmp)>0){
					l = m;
				}
				else { //prefix == tmp!
					retValue = m;
					if (first) 	// erstes Element könnte weiter links stehen!
						r = m; 
					else 		// erstes Element könnte weiter rechts stehen!
						l = m;
				}
			}
		}		
		return retValue;
	}
	
	/**
	 * creates an arraylist<string> with all words used in the old graphDB.
	 * @return arraylist with sorted words
	 */
	public static ArrayList<String> getSortedListOfWords(){
		ArrayList<String> words = new ArrayList<String>();
		for (Node n:oldDB.getAllNodes()){
			//TODO: lowercase raus nehmen?
			words.add(((String)n.getProperty(ConfigHelper.NAME_KEY)).toLowerCase());
		}
		Collections.sort(words);
		return words;
	}

	/**
	 * creates an arraylist<string> with all words used in the old graphDB.
	 * @return arraylist with sorted words
	 */
	public static ArrayList<String> getSortedListOfWordsFromEgoNetwork(Node n, DynamicRelationshipType dynType){
		ArrayList<String> words = new ArrayList<String>();
		for (Relationship rel:n.getRelationships(dynType,Direction.OUTGOING)){
			//TODO: lowercase raus nehmen?
			words.add(((String)rel.getEndNode().getProperty(ConfigHelper.NAME_KEY)).toLowerCase());
		}
		Collections.sort(words);
		return words;
	}
}

class ResultEntry implements Comparable{
	public String word;
	public Double weight;
	@Override
	public int compareTo(Object arg0) {
		return weight.compareTo(((ResultEntry)arg0).weight);
	}
	ResultEntry(String word, Double weight){
		this.word = word;
		this.weight = weight;
	}
}

