/*The calculate the tf_idf of the whole document set, we need the word information in
 * the aspect of document and the whole document set. First, the TfIDf class would fist
 * invoke the subroutine in Doc to get the basis information  of the word, the the term 
 * frequency,the corresponding position array of the words. While processing the each doc
 * the doc class would call back the TfIdf to update the corpus information, like the
 * document frequency of particular word.
 * With the document frequency, we would be able to calculate the tf_idf value of each word
 * in each document. Some part of our implementation idea is from google code with our own
 *  modification. http://en.wikipedia.org/wiki/Tf-idf
 *  
 * */


import java.io.BufferedReader;

import java.io.File;
import java.io.FileNotFoundException;
import java.io.FileReader;
import java.util.TreeMap;

public class TfIdf {
	
        public TreeMap<String, Doc> docs;
        public TreeMap<String, Double[]> wordSet;//for each word in the whole document, we would like
        										// to keep its information related to document frequency
        										// the first value would keep the updated doc freq
        										// the second value would keep the idf information

        public TfIdf(String foldername) throws FileNotFoundException {
        	
        	//open each documents, and analyze all the documents.
        	
                wordSet = new TreeMap<String, Double[]>();
                docs = new TreeMap<String, Doc>();

                
                // open the directory and keep updating the document information.
                // the doc would call back the addDocFrq() to update the doc freq 
                //for each word
                
                
                File datafolder = new File(foldername);
                        String[] files = datafolder.list( );
                        for (int i = 0; i < files.length; i++) {
                               
                                addDoc(foldername + "/" + files[i]);
                        }   
                
               // update the idf information
                        
               Double wordinfo[];
               for(String eachword:wordSet.keySet()){
            	    
            	   	wordinfo=wordSet.get(eachword);
            	   	wordinfo[1]=Math.log(10/wordinfo[0]);
            	   	wordSet.put(eachword, wordinfo);
            	   
               }
               
              // calculate the tf_idf information of each document
               
               for(String eachWord:docs.keySet()){
            	   
            	   docs.get(eachWord).calculateTfIdf(this);
            	 }
                
                
        }

        //subroutine to process the document
        
        public void addDoc(String filename) throws FileNotFoundException {
                BufferedReader br;
             
                        br = new BufferedReader(new FileReader(filename));
                        Doc doc = new Doc(br, this);
                        docs.put(filename.substring(filename.lastIndexOf('/') + 1), doc);
        }

        
        // this would be called back by Doc to update the doc freq information
        public void addDocFreq(String word) {
                Double[] tempdata;
                if (wordSet.get(word) == null) {
                        tempdata = new Double[]{1.0,0.0};
                        wordSet.put(word, tempdata);
                } else {
                        tempdata = wordSet.get(word);
                        tempdata[0]++;                 //add one document occurrence
                        wordSet.put(word,tempdata);

                }
        }
        
        public Doc get_doc(String key){
        	
        	return docs.get(key);
        }
        public TreeMap<String, Double[]> get_words(){
        	
        	return this.wordSet;
        	
        }
}
