package document;

import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.Vector;

import util.Pair;



/**
 * Implementation of the vector space retrieval model
 * 
 * @author severin
 * 
 */
public class VectorSpaceRetrieval {
	public VectorSpaceRetrieval(InvertedIndex index) {
		this.index = index;
		numberOfDocuments = index.getNumberOfDocuments();
	}

	/**
	 * This calculates the scalar product of the document d with all the
	 * documents that occur in the posing lists of the terms in d
	 * 
	 * @param d
	 *            The document
	 */
	public void searchSimilar(Document d) {
		// OK the main idea is the following:
		// We have posting list with document ID's where the term occur. For
		// each different document i in the
		// posting list we have to calculate the scalar product of document i
		// with document d
		// Since the documents are sorted in the posting lists according to
		// their ID
		// we can in each iteration look for the smallest document ID not
		// already processed in posting lists
		// and then calculate the scalar product with the document d

		// This represents the current scan line through the postings
		frontPostings = new Vector<Map.Entry<Integer, Posting>>();

		// This is a list of iterators for each posting list. So that we can
		// advance our scan line.
		itPostings = new Vector<Iterator<Map.Entry<Integer, Posting>>>();

		// We maintain a tree of all the scores the documents achieved, so we
		// can afterwards get a sorted list
		// in descending order of the scores for all documents
		scoresForDocId = new Vector<Pair<Double, Integer>>();

		// This builds the posting list for the terms in document d
		buildPostingListsForDocument(d);

		// So know we initialize our scan line. The vector frontPosting,
		// that represents the scan line is set to the first element of each
		// posing list.
		initScanLine();

		// We are doing now the scalar product always with the smallest document
		// ID in the scan line
		// and then advance the scan line for the document processed.
		boolean allPostingsProcessed = false;
		while (!allPostingsProcessed) {
			// Look for the smallest document ID in the scan line
			int minDocId = getSmallesDocumentIdInScanLine();

			// Calculate scalar product of document d with document 'minDocId'
			double scalarProduct = getScalarProductWith(d, minDocId);

			// Add the calculated score to our tree
			scoresForDocId.add(new Pair<Double, Integer>(scalarProduct, minDocId));

			// If the scan line has "consumed" all postings we are done
			allPostingsProcessed = true;
			for (int j = 0; j < frontPostings.size(); j++) {
				if (frontPostings.get(j) != null) {
					allPostingsProcessed = false;
				}
			}
		}
	}

	public void searchSimilar(String query) {
		searchSimilar(queryToDocument(query,false));
	}

	public void searchWithRelevance(String oldQuery,
			Vector<Integer> relevantIds, Vector<Integer> irrelevantIds,
			double weightOldQuery, double weightRelevantDocs,
			double weightIrrelevantDocs) {
		searchSimilar(relevanceFeedbackToDocument(oldQuery, relevantIds,
				irrelevantIds, weightOldQuery, weightRelevantDocs,
				weightIrrelevantDocs));
	}

	public List<Integer> getSimilarDocuments() {
		Collections.sort(scoresForDocId,new CompareScores());
		List<Integer> result = new LinkedList<Integer>();
		for (Pair<Double,Integer> d : scoresForDocId) {
			result.add(d.getSecond());
		}
		return result;
	}
	
	private class CompareScores implements Comparator<Pair<Double,Integer>>
	{

		@Override
		public int compare(Pair<Double, Integer> arg0,
				Pair<Double, Integer> arg1) {
			if(arg0.getFirst()< arg1.getFirst())
			{
				return 1;
			}
			else if(arg0.getFirst() == arg1.getFirst())
			{
				return 0;
			}
			else
			{
				return -1;
			}
			 
		}
		
	}

	protected void initScanLine() {
		// So know we initialize our scan line. The vector frontPosting,
		// that represents the scan line is set to the first element of each
		// posing list.
		for (int j = 0; j < postingListsOfDocumentTerms.size(); j++) {
			itPostings.add(postingListsOfDocumentTerms.get(j).iterator());
			if (itPostings.get(j).hasNext()) {
				frontPostings.add(itPostings.get(j).next());
			} else {
				frontPostings.add(null);
			}
		}
	}

	protected int getSmallesDocumentIdInScanLine() {
		int minDocId = Integer.MAX_VALUE;
		for (int j = 0; j < frontPostings.size(); j++) {
			if (frontPostings.get(j) != null
					&& frontPostings.get(j).getKey() < minDocId) {
				minDocId = frontPostings.get(j).getKey();
			}
		}
		return minDocId;
	}

	/**
	 * Calculates the scalar product between document d and the document 'docId'
	 * from the scan line
	 * 
	 * @param docId
	 *            The id MUST BE contained IN the current SCAN LINE
	 * @return The scalar product
	 */
	protected double getScalarProductWith(Document d, int docId) {
		double scalarProduct = 0;
		for (int j = 0; j < frontPostings.size(); j++) {
			// Only the terms in the document 'minDocId' are considered
			if (frontPostings.get(j) != null
					&& frontPostings.get(j).getKey() == docId) {
				if (index.getPostings(d.getEntries().get(j).getTerm()) != null) {
					// Calculate the score value according to the weighting
					// scheme from the lecture
					double scoreValueOfOtherDocument = Math
							.log(((double)numberOfDocuments)
									/ ((double) index.getPostings(
											d.getEntries().get(j).getTerm())
											.size()))
							* (1.0 + Math.log(frontPostings.get(j).getValue()
									.getTermFrequency()));

					// do the scalar product
					scalarProduct += d.getEntries().get(j).getScore()
							* scoreValueOfOtherDocument;

					// advance the scan line for any "consumed" posting
					if (itPostings.get(j).hasNext()) {
						frontPostings.set(j, itPostings.get(j).next());
					} else {
						frontPostings.set(j, null);
					}
				}
			}
		}
		return scalarProduct;
	}

	/**
	 * This builds the posting lists for all the terms in the document.
	 * 
	 * @param d
	 *            The document
	 */
	protected void buildPostingListsForDocument(Document d) {
		postingListsOfDocumentTerms = new Vector<Set<Map.Entry<Integer, Posting>>>();
		for (DocumentEntry e : d.getEntries()) {
			postingListsOfDocumentTerms.add(index.getPostings(e.getTerm()));
		}
	}

	/**
	 * Translates a query into a document
	 * 
	 * @param query
	 * @return
	 */
	protected Document queryToDocument(String query,boolean f) {
		Document result = new Document();
		Stemmer stemmer = new Stemmer();
		
		HashMap<String,Double> tempHash = new HashMap<String,Double>();
		String[] tokens = query.split(DELIMITERS);
		for (String t : tokens) {
			if (!StopWordLexicon.getInstance().isStopWord(t)) {
				String stemmedTerm = stemmer.stem(t);
				// Keep only the words that are in the inverted index.
				if (index.getPostings(stemmedTerm) != null) {
					double score = 0.0;
					if(f)
					{
					score = Math
					.log(index.getNumberOfDocuments()
							/ ((double) index.getPostings(t)
									.size()));
					}
					else
					{
						score = 1.0;
					}
					
					if(tempHash.containsKey(stemmedTerm))
					{
						tempHash.put(stemmedTerm, tempHash.get(stemmedTerm)+score);
					}
					else
					{
						tempHash.put(stemmedTerm, score);
					}
				}
			}
		}
		for(Map.Entry<String,Double> e :tempHash.entrySet())
		{
			result.addEntry(new DocumentEntry(e.getKey(),e.getValue()));
		}
		return result;
	}

	/**
	 * Generates a new query document taking into account a previous query and a
	 * set of relevant document Vectors and a Set of irrelevant Document
	 * vectors.
	 * 
	 * Uses the Roccio Algorithm
	 * (http://en.wikipedia.org/wiki/Rocchio_Classification)
	 * 
	 * 
	 * @param oldQuery
	 * @param relevantDocuments
	 * @return
	 */
	protected Document relevanceFeedbackToDocument(String oldQuery,
			Vector<Integer> relevantDocuments,
			Vector<Integer> irrelevantDocuments, double weightOldQuery,
			double weightRelevantDocs, double weightIrrelevantDocs) {
		Document result = new Document();
		
		Document oldQueryDoc = queryToDocument(oldQuery,true);
		HashMap<String, Double> tempHash = new HashMap<String, Double>();
		for (DocumentEntry documentEntry : oldQueryDoc.getEntries()) 
		{
			tempHash.put(documentEntry.getTerm(),documentEntry.getScore() *weightOldQuery);
		}

		Document doc = index.getDocumentForId(relevantDocuments.get(0));
		if (doc != null) {
			for (DocumentEntry documentEntry : doc.getEntriesSorted(40)) {
				String term = documentEntry.getTerm();
				double score = documentEntry.getScore();
				if(tempHash.containsKey(documentEntry.getTerm())) {
					Double weight = tempHash.get(documentEntry.getTerm());
					weight += (documentEntry.getScore() * weightRelevantDocs);
					tempHash.remove(documentEntry.getTerm());
					tempHash.put(documentEntry.getTerm(), weight);
				}
				else {
						result.addEntry(new DocumentEntry(documentEntry.getTerm(),
							documentEntry.getScore() * weightRelevantDocs));
				}
			}
			
		}
		
		for(Map.Entry<String,Double> e :tempHash.entrySet())
		{
			result.addEntry(new DocumentEntry(e.getKey(),e.getValue()));
		}
		
		return result;
	}

//	protected Vector<DocumentEntry> getDocumententriesOfID(int docId) {
//		Vector<DocumentEntry> result = new Vector<DocumentEntry>();
//
//		for (int j = 0; j < frontPostings.size(); j++) {
//			if (frontPostings.get(j) != null
//					&& frontPostings.get(j).getKey() == docId) {
//				Posting e = frontPostings.get(j).getValue();
//			}
//		}
//		return result;
//	}

	protected int numberOfDocuments;
	protected InvertedIndex index;
	protected Vector<Set<Map.Entry<Integer, Posting>>> postingListsOfDocumentTerms;

	/**
	 * This represents the current scan line through the postings
	 */
	protected Vector<Map.Entry<Integer, Posting>> frontPostings;

	/**
	 * This is a list of iterators for each posting list. So that we can advance
	 * our scan line.
	 */
	protected Vector<Iterator<Map.Entry<Integer, Posting>>> itPostings;

	/**
	 * This can not be a tree, since we may have the same score for different documents.
	 */
	protected Vector<Pair<Double, Integer>> scoresForDocId;

	private final String DELIMITERS = "[\\s.,;:\\-\\(\\)\\!\\?'\"]|(?<=[\\d])(?=[a-zA-Z])";

}
