/*This class is about the document processing.
 * For each document, it would first take the input txt as a string,
 * tokenize the string, separate the input words. For each word we would
 * build a map, where the word the key and the IndexNode is the value.
 * the IndexNode would contain the information like term frequency, occurrence,
 * tf_idf.
 * We would calculate the occurrence and the tf first. After all the documents have 
 * been processed, we will then calculate the tf_idf value.
 * 
 *  fist, we would split the word and iterate through to update the word node
 *  information. We would first calculate the occurrence times.
 * */
import java.io.BufferedReader;
import java.io.IOException;
import java.util.Iterator;
import java.util.StringTokenizer;
import java.util.TreeMap;

public class Doc {
        public TreeMap<String, IndexNode> words; // n_ij, tf_ij, tf_idf, position List
        int sumof_n_kj;
        double vectorlength;

        public Doc(BufferedReader br, TfIdf parent) {
                String line;
                String word;
                StringTokenizer tokens;
                int position=0;
                sumof_n_kj = 0;
                vectorlength = 0;
                IndexNode tempdata;
                
               // to each word in the document, we would build a map for it.
                // the node information would contains occurrence times,finally tf, tf_idf
                
                words = new TreeMap<String, IndexNode>();
                try {
                	// this block would iterate through the document to calculate the occurrence 
                	// times of each word.
                
                        line = br.readLine();
                        while (line != null) {
                        	
                        	
                                tokens = new StringTokenizer(line, ":; \"\',.[]{}()!?-/");
                                while(tokens.hasMoreTokens()) {
                                	    position++;
                                        word = tokens.nextToken().toLowerCase();
                                        word.trim();
                                        if (word.length() < 2) continue;
                                        
                                        //add position information and occurrence to the corresponding word
                                        if (words.get(word) == null) {
                                        	
                                                tempdata = new IndexNode();
                                                
                                                tempdata.add(position);
                                                
                                                words.put(word, tempdata);
                                        }
                                        else {
                                                tempdata = words.get(word);
                                                tempdata.add(position);          //occur time
                                                words.put(word,tempdata);
                                        }
                                        
                                        sumof_n_kj++;
                                }
                                line = br.readLine();
                        }
                } catch (IOException e) {
                        e.printStackTrace();
                }

                // after analysis all the words. we get the information about word counts.
                //then calculate the term frequency.
                
                for (Iterator<String> it = words.keySet().iterator(); it.hasNext(); ) {
                        word = it.next();
                        
                        tempdata = words.get(word);
                        
                        tempdata.caltf(sumof_n_kj);
                        
                        words.put(word,tempdata);
                        
                        parent.addDocFreq(word);
                }
        }
/*
 * based on the document frequency provided by TfIdf and the term frequency provided by document
 * itself. We would be able to calculate the tf_idf of each word in the document
 * 
 * */
        public void calculateTfIdf(TfIdf parent) {
              
                Double[] corpusdata;
                IndexNode worddata;
                
                for (String word : words.keySet() ) {
                	
                        corpusdata= parent.wordSet.get(word);
                        
                        worddata = words.get(word);
                        
                        worddata.set_tf_idf(corpusdata[1]);
      
                        vectorlength += worddata.get_tf_idf()*worddata.get_tf_idf();
                        words.put(word, worddata);
                }
                
                
                vectorlength = Math.sqrt(vectorlength);
        }

}