/*This program would run the main function of query expansion.
 * The query expansion consist of there major part.
 * 1. weightVector(List<Integer> relevance, TfIdf tf,Double negWeight,Double titleWeight)
 *   this method would calculate the weight the each word in the the corpus document. And 
 *   would return the weight of all words.
 * 2. queryExpansion(List<String> query, TreeMap(String, Double) vector)
 *   this subroutine would based on the return result of weightVector() to decide which query
 *   should be added to next iteration. We would argue that if the weight of second term is 3/4 
 *   smaller the first term. the second term won't be add the to next iteration.
 * 3. fuzzyOrder(List<String> query,TfIdf tf, List<Integer> relevance)
 *   the fuzzyOrder subroutine would decide the order of terms should be put into the next iteration.
 *   To define the order of the word, instead of topological sort(which if contains DAG) would fail,
 *   we design algorithm by our own. First, we would calculates the min distance between each query word
 *   in all the relevance document. And after compare the distance, we would in every iteration, decide
 *   which word should be add in the query.
 * 
 * 
 * */


import java.util.*;
public class RelFeed {

	// there might be many different word position relation between words. And we argue that only
	//the min distance one contains most meaningful information
	//calculate the min distance between each query word. And then the fuzzyOrder would based
	//on the distance information to order the query.
	public static int minDistance(String s1, String s2, TfIdf tf, List<Integer> relevance){
		
		int distance=6;
		IndexNode node1;
		IndexNode node2;
		
		for(Integer i: relevance){
			
			Doc doc=tf.get_doc(i.toString()+".txt");
			
			if(doc.words.containsKey(s1) && doc.words.containsKey(s2)){
				
				node1=doc.words.get(s1);
				node2=doc.words.get(s2);
				
				 for(Integer t: node1.get_positionArray()){
		   			 for(Integer j:node2.get_positionArray()){
		   				 
		   				 if(Math.abs(t-j)<Math.abs(distance)){
		   					 distance=t-j;
		   				 }
		   			 }
		   		 }
				
			}			
		}
		
		return distance;
	}
	
	// see the readme file for more information about the motivation of fuzzyOrder
	
	public static List<String> fuzzyOrder(List<String> query,TfIdf tf, List<Integer> relevance){
		
		List<String> returnQuery=new ArrayList<String>();
		
		int size=query.size();
		
		for(int k=0;k<size;k++){
			
			String min=query.get(0);
			
			for(String s:query){
				
				if(minDistance(min, s, tf, relevance)>0 && minDistance(min, s,tf, relevance)<6){
					min=s;
					
				}
			}
			
			returnQuery.add(min);
			query.remove(min);
		}
		return returnQuery;
	}
	
	// this subroutine would calculate the distance of two terms in the document
	 private static int distance(IndexNode node1, IndexNode node2){
		 
   		 int windowSize=5;
   		 
   		 for(Integer i: node1.get_positionArray()){
   			 for(Integer j:node2.get_positionArray()){
   				 
   				 if(Math.abs(i-j)<Math.abs(windowSize)){
   					 windowSize=i-j;
   				 }
   			 }
   		 }
   		 
		 return windowSize;
	 }
	 
	 //According to the definition of Latent Semantic Analysis, we would based on the tf_idf information of
	 //each term to calcualte the weight vector. and the terms that with highest weight would be the candidate
	 //of the expanded query.
	 public static TreeMap<String, Double> weightVector(List<Integer> relevance, TfIdf tf,Double negWeight){
		
		 TreeMap<String, Double> calWeight= new TreeMap<String, Double>();
		
		 
		 //for every relevant document we would add the corresponding tf_idf  weight of each word
		//in that doc to the vector
		 
		 for(Integer i:relevance){
			 
			 Doc doc= tf.get_doc(i.toString()+".txt");
			 
			  for(String word:doc.words.keySet()){
				  
				  if(calWeight.containsKey(word)){
					  
					  Double tem1=calWeight.get(word);
					  
					  IndexNode tem2=doc.words.get(word);
					  Double tem3;
					  
					  tem3=tem1+tem2.get_tf_idf()*(1+negWeight);
					  
					 calWeight.put(word, tem3);
				  }
				  else{
					  IndexNode tem4=doc.words.get(word);
					  calWeight.put(word, tem4.get_tf_idf()*(1+negWeight));					  
				  }
			  }
			  
		  }
		 
		 //correspondingly, the term in irrelevant doc should deducted the corresponding weight
		  for(int j=0;j<10;j++){
			  
			 Doc doc= tf.get_doc(j+".txt");
			 
			 for(String word:doc.words.keySet()){
				 
				 if(calWeight.containsKey(word)){
					 
					 Double tem1=calWeight.get(word);
					 
					  IndexNode tem2=doc.words.get(word);
					  
					  Double tem3;
					  tem3=tem1-tem2.get_tf_idf()*negWeight;
					  
					 calWeight.put(word, tem3);
				  }
				  else{
					  IndexNode tem4=doc.words.get(word);
					  calWeight.put(word, -tem4.get_tf_idf()*negWeight);					  
				  }
			 }
			  
		  }
		  
			  
		 return calWeight;
	 }
	 
	 //this subroutine, would decide which or which two term to be selected as the expanded query.
	 public static List<String> queryExpansion(List<String> query, TreeMap<String, Double> vector){
		 
		 double max=-5;
			double secondMax=-5;
			String firstTerm="";
			String secondTerm="";

			for(String s1:vector.keySet()){

				if(vector.get(s1)>max){
	
					max=vector.get(s1);
					secondTerm=firstTerm;
					firstTerm=s1;
	 
				}
				else if(vector.get(s1)>secondMax){
	
					secondMax=vector.get(s1);
					secondTerm=s1;
	 
					}
				}

	
			query.add(firstTerm);

			if(secondMax>3*max/4){	    // only the second term's weight is big enough would
										// add it to the expanded query.
				query.add(secondTerm);
				System.out.println("Augumenting by "+firstTerm+secondTerm);
			}
			else{
				System.out.println("Augumenting by "+firstTerm);
			}
		 
		return query; 
	  }
	 
	 
	// change the query from list to string, so that we can put it in the api request
	 public static String listToString(List<String> stringList){
		 
		 String str=stringList.get(0);
		 int size=stringList.size();
		 
		 for(int i=0;i<size-1;i++){
			 str=str+"%20"+stringList.get(i+1);
		 }
		 return str;
	 }
	// display the query information.
	 public static void inform(String api, String query, Double d ){
		 
		 System.out.println("Parameters:===>");
		 System.out.println("Client Key: "+api);
		 System.out.println("Query:\t"+query.replace("%20", " "));
		 System.out.println("Precision:\t"+d);
		 String request="http://boss.yahooapis.com/ysearch/web/v1/"+query+"?appid="+api+"&format=xml";
		 System.out.println("URL:\t"+request);
		 
	 }
	 
	 // show the query result
	 public static void feedback(Double resultPre, String req, Double Precision){
		 
		 System.out.println("=====================");
		 System.out.println("Feed Back Summary");
		 System.out.println("Query: "+ req.replace("%20", " "));
		 System.out.println("Precision  "+resultPre);
		 
		 if(resultPre>=Precision){
	 
			 System.out.println("Desired Precision reached, done");
		 }
		 else{
	
			 System.out.println("Sorry, still below the desired precision"+Precision);
		 }
	 }
	 public static void main(String args[]) throws Exception{
		 
		   double negWeight=1;
		  
		  String s=args[0];
		  TfIdf tf=null;
		 StringTokenizer tokens;
		  	  
		  List<Integer> relevance= new ArrayList<Integer>();
		  
		  List<String> query=new ArrayList<String>();
		  
		  TreeMap<String, Double> vector;
			  
		  
		     for(int i=1;i<args.length-2;i++){
		    	  s=s+" "+args[i];
		    	  
		      }
			  Double precision=Double.parseDouble(args[args.length-2]);
			  
			  String api=args[args.length-1];
			  
			  tokens = new StringTokenizer(s, " ");
              
			  while(tokens.hasMoreTokens()){
				  
				  String queryTerm=tokens.nextToken();
				  
				  query.add(queryTerm);
			  }
			  
		      String re=listToString(query);
		      
		      //XML parser and return the related document integers  array list
		      	inform(api, re,precision);
		      	
		        relevance=XML.reqResp(re,api);		  		
			  		
			    double resultPre= (double)relevance.size()/10;
		    
			    feedback(resultPre, re, precision);;
			  		
			 while(resultPre < precision && resultPre!=0){
		 
				 			tf = new TfIdf("input/");
			  
				 			vector=weightVector(relevance,tf,negWeight);
				 			
				 			query=queryExpansion(query, vector);
		  
				 			query=fuzzyOrder(query,tf, relevance);				
			           
						String req= listToString(query);
						
						inform(api, req,precision);	

						relevance=XML.reqResp(req,api);
						

						resultPre=(double) relevance.size()/10;
						feedback(resultPre, req, precision);
						if(resultPre==0){
							
							System.out.println("Query expansion failed, task being terminated");
							System.exit(0);
						}
            }
		
	 }
}


