package project3;

import java.util.Collection;
import java.util.HashMap;
import java.util.List;
import java.util.Map;

/**Determines how well a set of documents matches a query.
 *
 * Uses the follow algorithm for determining doc relevance (uses TF-IDF principles):
 *
 * First, given a list of stems wanted in the document, and a list of documents
 * containing those stems, calculate the weight of each query stem according to:
 * Ws = number of occurrences in query / number of words in query
 * IDFs = sqrt(number of docs / (1+number of docs with stem))
 * Weight of the stem is Ws * IDFs
 *
 * Determine length of weighted query vector: sqrt(sum(stem weight^2))
 * Determine length of document vector: sqrt(sum(word frequency ^ 2)) (for all words)
 *
 * Score for a document is defined by sum(stem frequency in doc * weight of stem) / (doc vector length * query vector length),
 * which <b>should</b> be roughly the cosine of the angle between the two vectors
 */
public class Goodness
{
    private Collection<Document> docs;
    Map<Document, Double> m = new HashMap<Document, Double>();
    double queryLength;
    Map<String, Double> stemWeights;

    /**Builds a Goodness
     * @param docs Documents to apply goodness function to
     * @param _query Query to build goodness function with
     */
    public Goodness(Collection<Document> docs, List<String> _query)
    {
        this.docs=docs;
 	    calculateStemWeights(_query);

        calculateQueryLength(stemWeights);
        
        for(Document d : docs)
    	    m.put(d, getGoodness(d));
    }
    
    /**Calculates the length of the query vector, given the weight of present stems
     * @param stemWeights Map from stem to weight of the stem in query
     * @return Length of the query vector
     */
    private void calculateQueryLength(Map<String, Double> stemWeights)
    {
        queryLength=0;
        if(stemWeights.size()==0) queryLength=1;
 	    for(double weight : stemWeights.values())
 	    {
		   queryLength+=weight*weight;
 	    }
	    queryLength = Math.sqrt(queryLength);
    }

    /** Determines how well a document matches a query by attempting to
     * find the cosine of the angle between the document vector and query vector.
     * Follows from Linear Algebra, cos(theta) = a dot b / (length a * length b)
     *
     * Also includes a factor of how well the document matched all query terms,
     * to give documents with all terms but relatively low relative frequencies
     * better weights then shorter documents with fewer terms.
     *
     * @param document Document to try and match goodness for
     * @return How well the document matches goodness.
     */
    public double getGoodness(Document document)
    {
       int matchingTerms=0;
	   double querySum=0;
       //calculate a dot b (query sum)
	   for(Map.Entry<String, IndexEntry> entries : document.getStemIndexEntries().entrySet())
	   {
            IndexEntry entry = entries.getValue();
            if(stemWeights.containsKey(entries.getKey()))
            {
               matchingTerms++;
               querySum+=entry.relativeFrequency*stemWeights.get(entries.getKey());
            }
	   }
	   double docLength = document.getDocumentVectorLength();
	   return matchingTerms*querySum/(queryLength*docLength);
    }

    public static double getGoodness(double x, double y)
    {
       return x/(1*y);
    }

    /**Determines the importance of each stem in a query.
     * 
     * Starts by performing a simple calculation to determine the weight of a
     * given stem in a query. Assumes each word in query has equal weight, and
     * if a word is repeated, the weight of the stem is increased.
     *
     * Once this simple weighting is calculated, performs an IDF analysis
     * on all stems, the resulting weights being the original weight * IDF factor.
     */
    private void calculateStemWeights(List<String> queryStems)
    {
       stemWeights=new HashMap<String, Double>();
       for(String stem : queryStems)
       {
           //if we haven't seen this stem, init its weight to 0;
           //   then add to it the weight for an individual stem
           if(!stemWeights.containsKey(stem))
               stemWeights.put(stem, 0.0);
           stemWeights.put(stem, stemWeights.get(stem)+(1.0/queryStems.size()));
       }
       adjustForIDF(stemWeights);
    }

    /**Adjusts stem weights by account for IDF. Roughly speaking:
     *
     * IDF for stem = sqrt( number of docs / (numer of docs with stem + 1))
     * Final weight of stem = original weight of stem * IDF for stem
     */
    private void adjustForIDF(Map<String, Double> stemWeights)
    {
        double docCount = docs.size();
        for(Map.Entry<String, Double> entry : stemWeights.entrySet())
        {
            int stemOccurrence=1;
            for(Document d : docs)
                if(d.getStemIndexEntries().containsKey(entry.getKey()))
                    stemOccurrence++;
            double inverseDocFrequency = Math.sqrt(docCount / stemOccurrence);
            entry.setValue(entry.getValue()*inverseDocFrequency);
        }
    }

    /** @return A map from document to goodness value
     */
    public Map<Document, Double> getDocs()
    {
       return m;
    }
}