/*
 * To change this template, choose Tools | Templates
 * and open the template in the editor.
 */
package no.ntnu.idi.deid.preprocessor.sentenizer.externaltools;

import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.FileWriter;
import java.io.IOException;
import java.io.InputStreamReader;
import java.util.ArrayList;
import java.util.List;

import no.ntnu.idi.deid.config.Constants;

import org.annolab.tt4j.TreeTaggerException;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;

/**
 *
 * @author Hans Moen
 */
public class DocumentToDocumentHandler {
    SentenceTokenizer st = null; 
    
    //private String documentname = "";
    
    public DocumentToDocumentHandler() throws FileNotFoundException, IOException, TreeTaggerException {
        
    }
    
    /**
     * Processes the given documents through stemming and removing stop words, 
     * adds <s> and </s> to each line, then saves to disk.
     * @param readFolder
     * @param saveFolder
     * @param language
     * @param stemExcludeList
     * @param abbreviationFile
     * @throws FileNotFoundException
     * @throws IOException
     * @throws TreeTaggerException 
     */
    public void processDocuments (String readFolder, String saveFolder, String language, String stemExcludeList, String abbreviationFile, String readEncoding) throws FileNotFoundException, IOException, TreeTaggerException {
        st = new SentenceTokenizer(language);
        
//       System.out.println("Start preprocessing documents ... ");
        
        InputStreamReader isr = null;
        FileWriter fw = null;        
        
        // See if the folder already exist
        File folder = new File(readFolder);
        
        if (folder.isDirectory()) {  
            File[] files = folder.listFiles();
            
            int filecounter = 0;
            // Go through each file and tokenize and doPreprocess (optional)
            for (File file : files) {
                //fr = new FileReader(file);
                isr = new InputStreamReader(new FileInputStream(file), readEncoding); // "ISO-8859-1"); //MacRoman //"UTF-8" //"ISO-8879-1"
//                System.out.println("READ ENCODING: "+ isr.getEncoding());
                fw = new FileWriter(saveFolder+"/"+"sen-"+file.getName());
//                System.out.println("SAVE ENCODING: "+fw.getEncoding());
                
                List<String> sentences = createSentences(isr, abbreviationFile);
                
                
                for (String sentence : sentences) {
                    
                	/**********Modifying Hans' Code**********/
//                	System.out.println(sentence);
                	fw.append(sentence+"\n");
                	
                	
                    //System.out.println("SENTENCE: "+sentence);
                    
//                    
//                    String[] prepSent = getPrepSentence(sentence, stemExcludeList);
//                    
//                    
//                    //System.out.print("PREP_SENTENCE: ");
//                    fw.append("<s> ");
//                    for (String word : prepSent) {
//                        if (!word.isEmpty()) {
//                            fw.append(word + " ");
//                            //System.out.print(word+" ");
//                        }
//                    }
//                    fw.append("</s> \n");
//                    //System.out.println("");
                	
                	
                	
                }
                
                //fr.close();
                isr.close();
                fw.close();
                //System.out.println(file.getName());
                filecounter++;
                if (filecounter % 100 == 0) {
                    System.out.println(filecounter + " ... ");
                }
            }
//            System.out.println(filecounter + " documents preprocessed.");
//            System.out.println("Done.");
            
        } else {
            System.out.println("Unable to find folder ... ");
        }
    }
    
    
    /**
     * Create a list of sentences from the given document.
     * 
     * @param documentfile
     * @return
     * @throws FileNotFoundException
     * @throws IOException 
     */
    private List<String> createSentences (InputStreamReader documentfile, String abbreviationFile) throws FileNotFoundException, IOException {
        InputStreamReader reader = documentfile;
        
        List<String> sentences = new ArrayList<String>();
        DocumentToSentences sentTokenizer = new DocumentToSentences(reader, abbreviationFile);
        
        CharTermAttribute sentCharAttr = sentTokenizer.addAttribute(CharTermAttribute.class);
        while (sentTokenizer.incrementToken()) {
//        	System.out.println("$ " + sentCharAttr.toString().trim() + " " + sentCharAttr.toString().trim().isEmpty());
            if (sentCharAttr.toString().trim().isEmpty()) {
            	sentences.add(Constants.SECTION_SPLITTER);
            	while(sentTokenizer.incrementToken() && sentCharAttr.toString().trim().isEmpty());{

            	}
            	if(!sentCharAttr.toString().trim().isEmpty())sentences.add(sentCharAttr.toString().trim());
            }
            else{
            	sentences.add(sentCharAttr.toString().trim());
            }
        }
        sentTokenizer.close();
        return sentences;
    }
    
    
    
    
    
    /**
     * Creates preprocessed documents, either through only segmentation, or also including stemming and stopword removal (see last argument).
     * @param args
     * @throws FileNotFoundException
     * @throws IOException
     * @throws TreeTaggerException 
     */
    public static void main(String[] args) throws FileNotFoundException, IOException, TreeTaggerException {
        DocumentToDocumentHandler dh = new DocumentToDocumentHandler();
        
        // NO_EPJ
        dh.processDocuments("testfiles/PreProcessorTest/inputfiler", "testfiles/PreProcessorTest/setningsoutput", "NO", "prepVocabulary.txt", "abbreviations.txt", "UTF-8"); //"ISO-8859-1" //"MacRoman" //"UTF-8" //"ISO-8879-1"

    }    
}
