package machinelearning;

import java.io.*;
import java.util.*;
import java.util.logging.Level;
import java.util.logging.Logger;
import weka.core.Instance;

/**
 * Stores data relating to a single text document. <br>
 * Stores a map of words and their frequencies in the document.
 * @author  Anat Nesichi & Moshe Goren
 */
public class Document
{   
    private String fileName;
    public WordMap words;

    /**
     *  The constructor opens the file and prepares the document data
     * @param filename The full path to the document file
     */
    public Document(String filename)
    {
        fileName = filename;
        LoadFileAndPrepareDocument();
    }

    /**
     * Loads the text from the file and performs the pre-processing on the text
     */
    private void LoadFileAndPrepareDocument()
    {
        StringBuilder fileText = new StringBuilder();
        try
        {
            BufferedReader fileRead = new BufferedReader(new FileReader(fileName));

            String temp;
            while ((temp = fileRead.readLine()) != null)
            {
                fileText.append(temp.toLowerCase());
            }

            fileRead.close();

        }
        catch (IOException ex)
        {
            Logger.getLogger(Document.class.getName()).log(Level.SEVERE, null, ex);
        }

        // Split text into tokens while excluding digits, whitespaces and punctuation marks
        String[] AllWords = fileText.toString().split("[\\d\\W]");

        words = new WordMap(AllWords.length);

        // We can choose one of these "Stemmers" that change a word into its root:

        //PorterStemmer stemmer = new PorterStemmer();
        LancasterStemmer stemmer = new LancasterStemmer();

        for (String currWord : AllWords)
        {
            // Make sure current word is not empty and not a stop-word
            if (!currWord.isEmpty() && !BuckleyAndSaltonStopWords.isStopWord(currWord))
            {
                // Create stemmed (root) version of word
                String stemmedWord = stemmer.stem(currWord);

                if (stemmedWord.isEmpty())
                {
                    words.Add(currWord);
                }
                else
                {
                    words.Add(stemmedWord);
                }
            }
        }
    }

   /**
    * Returns the normalized feature vector of this document from list of feature words
    * @param featureWords
    * @return The normalized feature vector
    */
    public Vector<Double> createFeatureVector(ArrayList<String> featureWords)
    {
        Vector<Double> featureVec = new Vector<Double>();
        Iterator<String> wordIt = featureWords.iterator();

        featureVec.ensureCapacity(featureWords.size());

        while (wordIt.hasNext())
        {
            featureVec.add(new Double(words.WordFreq(wordIt.next())));
        }

        return DoubleVectorUtils.NormalizeVector(featureVec);
    }

    public void createFeatureInstance(ArrayList<String> featureWords, Instance in)
    {
        Vector<Double> featureVec = createFeatureVector(featureWords);
        
        int i = 0;
        for (Double feature : featureVec)
        {
            in.setValue(i++, feature.doubleValue());
        }
        //return new Instance(1.0, doubleVec);
    }
}
