package project3;

import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.TreeMap;

/** An Inverted index is responsible for keeping a list of all words, and
 *  the documents that contain those words; preferably with some form of
 *  frequency data also available.
 */
public class InvertedIndex
{
    /** Simple list of all documents known to the index */
    private List<Document> documents;
    /** Maps a Stem word to a list of documents containing that word. */
    private Map<String, List<Document>> indexMap;
    private CachingStemmer stemmer;

    /** Builds an InvertedIndex */
    public InvertedIndex()
    {
        indexMap=new TreeMap<String, List<Document>>(String.CASE_INSENSITIVE_ORDER);
        stemmer=new CachingStemmer();
        documents=new ArrayList<Document>();
    }

    /**Adds a document to the inverted index.
     *
     * After adding a document, the documents stem index entry map will be filled.
     *
     * @param doc Document to add to index
     */
    public void addDocument(Document doc)
    {
        System.out.println("Indexing: "+doc.getTitle());

        List<String> stems = tokenizeAndStem(doc);
        Map<String, Integer> stemCount = buildHistogram(stems);
        //determine frequency info for each stem in the document
        for(Map.Entry<String, Integer> stemEntry : stemCount.entrySet())
        {
            String stem=stemEntry.getKey();
            //frequency is number of occurrences / number of unique stems
            double relativeFrequency = stemEntry.getValue() / (double)stemCount.size();
            if(!this.indexMap.containsKey(stem))
                indexMap.put(stem, new ArrayList<Document>());

            //add the document to the list of docs that have the current stem
            indexMap.get(stem).add(doc);

            //add the index entry to the document
            IndexEntry ixEntry=new IndexEntry(stemEntry.getValue(), relativeFrequency);
            doc.getStemIndexEntries().put(stem, ixEntry);
        }
        documents.add(doc);
    }

    /**Given a document, tokenizes the document, then replaces each word token
     * with its associated stem token.
     * @return A list of stems.
     */
    public List<String> tokenizeAndStem(Document doc)
    {
        return tokenizeAndStem(doc.getDocumentText());
    }
    
    /**Given a text, tokenizes the text, then replaces each word token
     * with its associated stem token.
     * @return A list of stems.
     */
    public List<String> tokenizeAndStem(String text)
    {
        Tokenizer tokenizer = new Tokenizer();
        List<String> tokens = tokenizer.tokenize(text);
        for(int i=0; i<tokens.size(); i++)
        {
            String token = tokens.get(i);
            tokens.set(i, stemmer.stem(token));
        }
        return tokens;
    }

    /** Returns all documents indexed by this index */
    public List<Document> getDocuments()
    {
        return documents;
    }

    /**Given a list of stemmed tokens, builds a map from stem to number of occurrences
     * @param tokens A list of stem tokens to be categorized
     * @return A map from stem to number of occurrences of the stem
     */
    private Map<String, Integer> buildHistogram(List<String> tokens)
    {
        HashMap<String, Integer> histogram=new HashMap<String, Integer>();
        for(String token : tokens)
            if(histogram.containsKey(token))
                histogram.put(token, histogram.get(token)+1);
            else
                histogram.put(token, 1);
        return histogram;
    }

    /**Finds all Index Entries associated with a specific stem word
     * @param stem Stem word to search for
     * @return A List of entries, or a zero length list if nothing is found
     */
    public List<Document> findEntriesForStem(String stem)
    {
        if(indexMap.containsKey(stem))
            return indexMap.get(stem);
        return new ArrayList<Document>();
    }

    /**Determines which words appearr in move then a certain number of
     * documents. Such words would appear often enough that using them to find
     * information is ultimately frivelous.
     *
     * Returns any word that satisfies:
     * docs containing word / number of docs >= max probability
     * @param maxProbability The maximum probability that a single word appears
     * in a document, as compared to the entire collection. On the range 0 to 1.
     * @return A List of words which appear very often in the collection of docs.
     */
    public HashSet<String> findStopWords(double maxProbability)
    {
        double docCount=documents.size();
        HashSet<String> words=new HashSet<String>();
        for(Map.Entry<String, List<Document>> entry : indexMap.entrySet())
            if(entry.getValue().size()/docCount >= maxProbability)
                words.add(entry.getKey());
        return words;
    }
}
