package Search;

import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
import java.util.List;
import java.util.Map;

import Classes.Query;
import Classes.Document;
import IndexingLucene.MyIndexReader;

public class QueryRetrievalModel {
	
	protected MyIndexReader indexReader;
	private static double MU = 2000.0; // μ parameter in Dirichlet Prior Smoothing
	private static long CORPUS_SIZE; // Corpus size;
	
	public QueryRetrievalModel(MyIndexReader ixReader) {
		this.indexReader = ixReader;
		CORPUS_SIZE = this.indexReader.getCorpusSize();
	}
	
	/**
	 * Search for the topic information. 
	 * The returned results (retrieved documents) should be ranked by 
	 * the score (from the most relevant to the least).
	 * TopN specifies the maximum number of results to be returned.
	 * 
	 * @param aQuery The query to be searched for.
	 * @param TopN The maximum number of returned document
	 * @return
	 */
	public List<Document> retrieveQuery(Query aQuery, int TopN) throws IOException {
		// NOTE: you will find our IndexingLucene.MyindexReader provides method: docLength()
		// implement your retrieval model here, and for each input query, return the topN 
		// retrieved documents and sort docs based on their relevance score, from high to low.
		List<Document> rankedList = new ArrayList<>();
		// <docid, <term, tf>>
		Map<Integer, HashMap<String, Integer>> queryResult = new HashMap<>();
		// <term, cf>
		Map<String, Long> termFreq = new HashMap<>();
		
		// Store each token in given single query
		String[] tokens = aQuery.GetQueryContent().split(" "); // tokens=[Hong, Kong, economics, Singapore]
		
		// Get term and its corresponding tf in the key of docid
		for(String token: tokens) {
			long cf = indexReader.CollectionFreq(token);
			termFreq.put(token, cf);
			if(cf == 0) {
				System.out.println("Token <" + token + "> does not exist in the corpus.");
				continue;
			}
			
			/**
			 * posting: Each item in the list records that a term appeared in a doc.
			 * postings list: The list of positions.
			 * postings: The postings lists taken together.
			 * 
			 * queryResult: <docid, <term, tf>>
			 * termFreq: <term, cf>
			 */
			int[][] postings = indexReader.getPostingList(token); // postings: a set of [docid][tf] of the current term 
			for(int[] postingsList: postings) { // postingsList: [docid][tf](every row in postings)
				if(!queryResult.containsKey(postingsList[0])) { // postingsList[0]: docid
					HashMap<String, Integer> tfMap = new HashMap<>(); // tfMap: <token, tf>
					tfMap.put(token, postingsList[1]); //postingsList[1]: tf of the term 
					queryResult.put(postingsList[0], tfMap); // queryResult: <docid, <token, tf>>
				} else {
					queryResult.get(postingsList[0]).put(token, postingsList[1]);
				}
			} // End of inner for loop
		} // End of outer for loop
		/*
		 * After the for loop, we have the map <docid, <term, term_freq>> for each doc to all terms in the query
		 * tokens = [Hong, Kong, economics, Singapore]
		 * 
		 * queryResult : <docid, <term, term_freq>>
		 *                    1,  hong, 15
		 *                        Singapore, 3
		 *                    3,  Singapore, 1
		 *                        kong, 10
		 *                        economics, 30
		 *                  ...,  ...
		 */	
		
		List<DocScore> lResults = new ArrayList<>(); // DocScore : <docid, score>
		//System.out.println(queryResult.size()); // 0
		// Assign a relevance score to each doc
		queryResult.forEach((docid, tfMap) -> {
			int docLen = 0;
			double score = 1.0;
			try {
				docLen = indexReader.docLength(docid);
			} catch (IOException e) {
//				e.printStackTrace();
			};
			
			/**
			 * Dirichlet Prior Smoothing:
	    	 * score = λ*p_doc + (1-λ)*p_ref
	    	 *       = c1*p_doc + c2*p_ref
			 */
	    	double c1 = docLen / (docLen + MU);
	    	double c2 = MU / (docLen + MU);
	    	
	    	for(String token: tokens) {
	    		long cf = termFreq.get(token);
	    		
	    		if(cf == 0) {
	    			continue;
	    		}
	    		
	    		/**
	    		 * tfMap: <term, tf>
	    		 * tf: term frequency
	    		 */
	    		int tf = tfMap.getOrDefault(token, 0);
	    		double pDoc = (double) tf / docLen; 
	    		double pRef = (double) cf / CORPUS_SIZE;
	    		score = score * (c1 * pDoc + c2 * pRef);
	    	}
	    	
	    	DocScore currentDocDS = new DocScore(docid, score);
//	    	System.out.println("docid: " + docid + " score: " + score);
	    	lResults.add(currentDocDS);		
		}); // End of going through every doc
		// System.out.println(lResults.size());
		
		// lResult -> ArrayList<DocScore>
		// Sort doc from most relevant to least relevant
		Collections.sort(lResults, new Comparator<DocScore>() {
			@Override
			public int compare(DocScore ds1, DocScore ds2) {
				if(ds1.score != ds2.score) {
					return ds1.score < ds2.score ? 1 : -1;
				} else {
					return 1;
				}
			}
		});
		
		DocScore ds = null;
		//System.out.println(lResults.size()); //0
		for(int i = 0; i < TopN; i++) {
			ds = lResults.get(i); // Get every doc score in the list
			
			int docid = ds.getId();
			Document doc = new Document(Integer.toString(docid), 
					                    indexReader.getDocno(docid), 
					                    ds.getScore());	
			rankedList.add(doc);
		}

		return rankedList;			
	} // End of the current function
	
	/**
	 * Represent a doc with docid and corresponding score.
	 * @author florahan
	 */
	private class DocScore {
		
		private int docid;
		private double score;
		
		public DocScore(int docid, double score) {
			this.docid = docid;
			this.score = score;
		}
		
		public int getId() {
		    return this.docid;
		}

		public double getScore() {
		    return this.score;
		}
		    
	    public String toString() {
		    return docid + ", " + score;
		}
	} // End of class DocScore
	
	/**
	 * comparator for sorting the result List<DocScore>
	 * 
	 * @author florahan
	 */
//	private class DocScoreComparator implements Comparator<DocScore> {
//		public int compare(DocScore arg0, DocScore arg1) {
//			if (arg0.score != arg1.score) {
//				return arg0.score < arg1.score ? 1 : -1;
//			} else {
//				return 1;
//			}
//		}
//	} 
}