/*
 * To change this template, choose Tools | Templates

 * and open the template in the editor.
 */
package no.ntnu.idi.deid.preprocessor.sentenizer.externaltools;

import java.io.BufferedReader;
import java.io.FileNotFoundException;
import java.io.FileReader;
import java.io.IOException;
import java.io.Reader;
import java.io.StringReader;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.logging.Level;
import java.util.logging.Logger;
import org.annolab.tt4j.TokenHandler;
import org.annolab.tt4j.TreeTaggerException;
import org.annolab.tt4j.TreeTaggerWrapper;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.de.GermanAnalyzer;
import org.apache.lucene.analysis.no.NorwegianAnalyzer;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.analysis.util.CharArraySet;
import org.apache.lucene.util.Version;

/**
 *
 * @author  Hans Moen
 */
public class SentenceTokenizer {
    
            // Specifying location of the TreeTagger installation
    String property = System.setProperty("treetagger.home", "/Users/hansmoe/TreeTagger");
    @SuppressWarnings("rawtypes")
	TreeTaggerWrapper tt = new TreeTaggerWrapper<String>();
    final Map<String, String> wordTable = new LinkedHashMap<String, String>();
    final ArrayList<String> wordTableNew = new ArrayList<String>();
    
    Reader readerBasic = null;
    Reader readerStem = null;

    TokenStream tokenStreamBasic = null;
    TokenStream tokenStreamStem = null;

    CharTermAttribute charTermAttributeBasic = null;
    CharTermAttribute charTermAttributeStem = null;
    
    String language = "";
    
    @SuppressWarnings("unchecked")
	public SentenceTokenizer(String lan) throws IOException {
        language = lan;
        //tt.setModel("/Users/hansmoe/TreeTagger/lib/german.par:iso8859-1");
        
        if (language.startsWith("DE-Stem")) {
            tt.setModel("/Users/hansmoe/TreeTagger/lib/german.par:iso8859-1");
            //tt.setModel("/Users/hansmoe/TreeTagger/lib/german-chunker.par:iso8859-1");
            
        } else if (language.startsWith("EN-Stem")) {
            tt.setModel("/Users/hansmoe/TreeTagger/lib/english.par:iso8859-1");            
            //tt.setModel("/Users/hansmoe/TreeTagger/lib/english-chunker.par:iso8859-1");
            
        } else if (language.startsWith("NO")) {
            // No model exist for Norwegian so we are using a stemmer (-> Snowball?)
        
        } else {
            return;
        }
        
        
        tt.setHandler(new TokenHandler<String>() {
            @Override
            public void token(String token, String pos, String lemma) {
                //System.out.println(token + "\t" + pos + "\t" + lemma);
                try {
                    // Write the text to file
                    //wordTableNew.add(token + "; " + lemma);
                    wordTableNew.add(lemma);
                    
                    //wordTable.put(token, lemma + "_" + pos);
                    //System.out.println(token + ", " + lemma);
                } catch (Exception ex) {
                    Logger.getLogger(SentenceTokenizer.class.getName()).log(Level.SEVERE, null, ex);
                }
            }
        });
        //--------------
    }

    /**
     * Reads a list of words from a file into a
     * <code>HashSet</code>. All incoming words are optionally transformed to
     * lower case.
     *
     * @param vocabularyfile filename of the file containing the list of words
     * @param lowercase <code>true</code> if all words should be converted to
     * lower case, <code>false</code> original casing should be preserved
     * @return a <code>HashSet</code> containing the words
     */
    private static HashSet<String> readVocabulary(String vocabularyfile, boolean lowercase) {
        HashSet<String> wordSet = new HashSet<String>();
        try {
            BufferedReader fr = new BufferedReader(new FileReader(vocabularyfile));
            String word = fr.readLine();
            // If the word list we're reading from is a FrequencyList
            if (word != null && word.indexOf("\t") > 0) {
                word = word.split("\t")[0];
            }
            while (word != null) {
                // Do not add comments
                if (!word.startsWith("#")) {
                    if (lowercase) {
                        wordSet.add(word.toLowerCase());
                    } else {
                        wordSet.add(word);
                    }
                }
                word = fr.readLine();
                if (word != null && word.indexOf("\t") > 0) {
                    word = word.split("\t")[0];
                }
            }
            fr.close();
        } catch (FileNotFoundException fnfe) {
            System.err.println("Warning: vocabulary file '" + wordSet + "' not found.");
        } catch (IOException ioe) {
            ioe.printStackTrace();
        }
        return wordSet;
    }

    /**
     * Stemming.
     * @param sentence
     * @param stemExclusionFilename
     * @return
     * @throws FileNotFoundException
     * @throws IOException
     * @throws TreeTaggerException 
     */
    @SuppressWarnings({ "deprecation", "resource", "unchecked" })
	public ArrayList<String> stemSentence(String sentence, String stemExclusionFilename) throws FileNotFoundException, IOException, TreeTaggerException {
        sentence = sentence.toLowerCase();
        wordTable.clear();
        wordTableNew.clear();
        
        
        List<String> tokensBasic = new ArrayList<String>();
        List<String> tokensStem = new ArrayList<String>();
        
        try {
            
            Analyzer basicAnalyzer = null;
            Analyzer analyzer = null;

            if (language.startsWith("DE-Stem")) { // Using German text
                
                //tt.setModel("/Users/hansmoe/TreeTagger/lib/german.par:iso8859-1"); //---------------
                
                analyzer = new StandardAnalyzer(Version.LUCENE_CURRENT, new FileReader("empty.txt")); // No words are removed
                
            } else if (language.startsWith("EN-Stem")) { // Using English text
                
                //tt.setModel("/Users/hansmoe/TreeTagger/lib/english-chunker.par:iso8859-1");
                //tt.setModel("/Users/hansmoe/TreeTagger/lib/english.par:iso8859-1");

                analyzer = new StandardAnalyzer(Version.LUCENE_CURRENT, new FileReader("empty.txt")); // No words are removed
                
                // Some extra rules..
                sentence = sentence.replaceAll("don't", "do not");

                sentence = sentence.replaceAll("i'll", "i will");

                sentence = sentence.replaceAll("won't", "will not");

                sentence = sentence.replaceAll("doesn't", "does not");

                sentence = sentence.replaceAll("it's", "it is");

                sentence = sentence.replaceAll("'s", "");

                sentence = sentence.replaceAll(" s ", "");
                
                //System.out.println("Sentence: "+sentence);

            } else if (language.startsWith("NO")) { // Using Norwegian text
                
                HashSet<String> stemExclude = readVocabulary(stemExclusionFilename, true);
                CharArraySet cas = new CharArraySet(Version.LUCENE_CURRENT, stemExclude, true);
                
                basicAnalyzer = new StandardAnalyzer(Version.LUCENE_CURRENT, new FileReader("empty.txt"));
                analyzer = new NorwegianAnalyzer(Version.LUCENE_CURRENT, null, cas); // No words are removed       
                
            } else {
                System.out.println("\nWrong language ... ");
                return null;
            }
            //Set set = new HashSet();



            
            readerStem = new StringReader(sentence);
            //System.out.println("Original sentence: " + sentence);


            //List<String> tokens = new ArrayList<String>();


            tokenStreamStem = analyzer.tokenStream("", readerStem);
            

            //OffsetAttribute offsetAttribute = tokenStreamStem.addAttribute(OffsetAttribute.class);
            
            charTermAttributeStem = tokenStreamStem.addAttribute(CharTermAttribute.class);
            

            String term = "";
            
            
            if (language.startsWith("NO")) {
                readerBasic = new StringReader(sentence);
                tokenStreamBasic = basicAnalyzer.tokenStream("", readerBasic);
                charTermAttributeBasic = tokenStreamBasic.addAttribute(CharTermAttribute.class);
                
                
                // Go trough the text and only segment it
                while (tokenStreamBasic.incrementToken()) {
                    term = charTermAttributeBasic.toString();
                    //System.out.println("Stem: " + term);
                    //int startOffset = offsetAttribute.startOffset();
                    //int endOffset = offsetAttribute.endOffset();
                    //System.out.println(term + ", startOffset: " + startOffset + ", endOffset: " + endOffset);

                    //System.out.println("Term: " +term);
                    tokensBasic.add(term);
                    //tokens.add(term);
                }
                readerBasic.close();
            }
            
            
            
            // Go trough the text and lemmatize/stem it
            while (tokenStreamStem.incrementToken()) {
                term = charTermAttributeStem.toString();
                //System.out.println("Stem: " + term);
                //int startOffset = offsetAttribute.startOffset();
                //int endOffset = offsetAttribute.endOffset();
                //System.out.println(term + ", startOffset: " + startOffset + ", endOffset: " + endOffset);
                
                //System.out.println("Term: " +term);
                tokensStem.add(term);
                //tokens.add(term);
            }
            readerStem.close();
            //tokenStreamStem.close();
            
            

            //TokenStream stream = analyzer.tokenStreamStem("field", readerStem);
            //TermAttribute termAttr = stream.getAttribute(TermAttribute.class);
            //while (stream.incrementToken()) {
            //String token = termAttr.term();
            //System.out.println("#2 "+token);
            //}

            // Stem only
            if (language.startsWith("EN-Stem") || language.startsWith("DE-Stem")) {
                
                String[] basic = new String[tokensStem.size()];
                String[] stem = new String[tokensStem.size()];
                
                int swIndex = 0;
                for (int i = 0; i < basic.length; i++) {
                    basic[i] = tokensStem.get(i);
                    //stem[i] = tokensStem.get(i);
                    
                    if (tokensStem.size() > swIndex) {
                        //System.out.println("stem: "+tokensStem.get(i)+ "; stemSWRem: "+tokensStemSWRem.get(swIndex));
                        if (tokensStem.get(i).equalsIgnoreCase(tokensStem.get(swIndex))) { // No need for this check here...!!!
                            //stemSWRem[i] = tokensStemSWRem.get(swIndex);
                            
                            // Check if the term contains only numbers
                            if (!tokensStem.get(swIndex).matches("[a-zA-ZæøåÆØÅäÄéöÖüÜß]+.|[a-zA-ZæøåÆØÅäÄéöÖüÜß]+")) {
                                
                                // If so, do not add it
                                //System.out.println("NO_MATCH: "+tokensStemSWRem.get(swIndex));
                                stem[i] = ""; //tokensStemSWRem.get(swIndex).replaceAll("[0-9,]+", ""); //------------??????
                                
                            } else {
                                stem[i] = tokensStem.get(swIndex);
                            }
                            swIndex++;
                        } else {
                            stem[i] = "";
                        }

                    } else {
                        stem[i] = "";
                    }
                }
                
                tt.process(stem);
                
                // ------ OLD, includes numbers..!
                //for (String token : tokensBasic) {
                //    wordTable.put(token, "");
                //}
                //tt.process(asList(tokensStem.toArray()));
                // ------------------------------

                
            // Stemming (only)
            } else if (language.startsWith("NO-Stem")) {


                String[] basic = new String[tokensStem.size()];
                String[] stem = new String[tokensStem.size()];

                int swIndex = 0;
                for (int i = 0; i < basic.length; i++) {
                    basic[i] = tokensBasic.get(i);
                    stem[i] = tokensStem.get(i);
                    
                    if (tokensStem.size() > swIndex) {
                        //System.out.println("stem: "+tokensStem.get(i)+ "; stemSWRem: "+tokensStemSWRem.get(swIndex));
                        if (tokensStem.get(i).equalsIgnoreCase(tokensStem.get(swIndex))) { // No need for this check here...!!!
                            //stemSWRem[i] = tokensStemSWRem.get(swIndex);
                            
                            // Check if the term contains only numbers
                            if (!tokensStem.get(swIndex).matches("[a-zA-ZæøåÆØÅ]+.|[a-zA-ZæøåÆØÅ]+")) {
                                
                                // If so, do not add it
                                //System.out.println("NO_MATCH: "+tokensStemSWRem.get(swIndex));
                                stem[i] = ""; //tokensStemSWRem.get(swIndex).replaceAll("[0-9,]+", ""); //------------??????
                                
                            } else {
                                stem[i] = tokensStem.get(swIndex);
                            }
                            swIndex++;
                        } else {
                            stem[i] = "";
                        }

                    } else {
                        stem[i] = "";
                    }

                }
                
                for (int i = 0; i < basic.length; i++) {
                    //wordTableNew.add(basic[i] + "; " + stem[i]);
                    wordTableNew.add(stem[i]);    
                }            
            
            
            // Only return the "basic" version of the sentence, without numbers
            } else if (language.equalsIgnoreCase("NO")) {
                
                String[] basic = new String[tokensBasic.size()];
                
                for (int i = 0; i < basic.length; i++) {
                    basic[i] = tokensBasic.get(i);
                    
                    // Check if the term contains only numbers
                    if (!basic[i].matches("[a-zA-ZæøåÆØÅ]+.|[a-zA-ZæøåÆØÅ]+")) {

                        // If so, do not add it
                        //System.out.println("NO_MATCH: "+tokensStemSWRem.get(swIndex));
                        basic[i] = ""; //tokensStemSWRem.get(swIndex).replaceAll("[0-9,]+", ""); //------------??????
                    }
                    wordTableNew.add(basic[i]);    
                }
            }

        } finally {
            //tt.destroy();
            //System.out.println("Done.");
        }
        return wordTableNew;

    }
    
    /**
     * Stemming.
     * 
     * @param sentAsList
     * @param stemExclusionFilename
     * @return
     * @throws FileNotFoundException
     * @throws IOException
     * @throws TreeTaggerException 
     */
    public ArrayList<String> stemSentence(ArrayList<String> sentAsList, String stemExclusionFilename) throws FileNotFoundException, IOException, TreeTaggerException {
        
        String sentence = "";
        
        // Create a string sentence with the stemmed words
        for (String word : sentAsList) {
            //sent += word.split(";")[1] + " ";
            sentence += word + " ";
        }
        
        return stemSentence(sentence, stemExclusionFilename);
        
    }
    
    
    
    /**
     * Remove stopwords from the sentence, represented as a list of words
     * @param sent sentence as string
     * @return sentence without stopwords
     */
    @SuppressWarnings({ "resource", "deprecation" })
	private ArrayList<String> stopwordRemoval (String sent) throws IOException {
        
        sent = sent.toLowerCase();
        
        Analyzer analyzer = null;
        
        if (language.startsWith("EN-")) {
            analyzer = new StandardAnalyzer(Version.LUCENE_36); // Uses English stopset
        } else if (language.startsWith("DE-")) {
            analyzer = new StandardAnalyzer(Version.LUCENE_36, GermanAnalyzer.getDefaultStopSet());
        } else if (language.startsWith("NO-")) {
            analyzer = new StandardAnalyzer(Version.LUCENE_36, NorwegianAnalyzer.getDefaultStopSet());
        } else {
            return null;
        }
        
        StringReader reader = new StringReader(sent);
        
        TokenStream ts = analyzer.tokenStream("", reader);
        CharTermAttribute cta = ts.addAttribute(CharTermAttribute.class);
        
        String term = "";
        
        ArrayList<String> swRemSentenceAsList = new ArrayList<String>();
        
        // Go trough the text and remove stopwords
        while (ts.incrementToken()) {
            term = cta.toString();
            
            //System.out.println("Term: " +term);
            
            swRemSentenceAsList.add(term);
        }
        
        reader.close();
        
        return swRemSentenceAsList;
    }
    
    
    /**
     * Remove stopwords from the sentence, represented as a list of words
     * @param sentenceAsList list of words
     * @return sentence without stopwords
     */
    private ArrayList<String> stopwordRemoval (ArrayList<String> sentAsList) throws IOException {
        
        String sent = "";
        
        // Create a string sentence with the stemmed words
        for (String word : sentAsList) {
            //sent += word.split(";")[1] + " ";
            sent += word + " ";
        }
        
        return stopwordRemoval(sent);
    }
    
    
    /**
     * First applies stemming, then removes stopwords
     * @param sentence
     * @param stemExclusionFilename
     * @return
     * @throws FileNotFoundException
     * @throws IOException
     * @throws TreeTaggerException 
     */
    public ArrayList<String> processSentence (String sentence, String stemExclusionFilename) throws FileNotFoundException, IOException, TreeTaggerException {
        
        sentence = sentence.toLowerCase();
        
        ArrayList<String> sentList = null;
        
        if (language.startsWith("NO")) {
            if (language.endsWith("StemSWRem")) {
                sentList = stopwordRemoval(sentence);
            
            } if (language.contains("-Stem")) {
                if (sentList == null) {
                    sentList = stemSentence(sentence, stemExclusionFilename);
                } else {
                    sentList = stemSentence(sentList, stemExclusionFilename);
                }
            
            } else {
                sentList = stemSentence(sentence, stemExclusionFilename);
            }
            
        
        } else if (language.startsWith("EN-") || language.startsWith("DE-")) {
            if (language.contains("-Stem")) {
                sentList = stemSentence(sentence, stemExclusionFilename);
            }

            if (language.endsWith("StemSWRem")) {
                sentList = stopwordRemoval(sentList);
            }
        }
        
        
        return sentList;
        /*
        String stem = "";
        for (String word : sentList) {
            stem += word.split(";")[1] + " ";
        }
        stem = stem.trim();
        
        return stopwordRemoval(stem);
        */
    }
    
    
    
    /**
     * Main, for testing
     */
    public static void main(String[] args) throws FileNotFoundException, IOException, TreeTaggerException {

        //SentenceTokenizer st = new SentenceTokenizer("DE-StemSWRem");

        //SentenceTokenizer_2 st = new SentenceTokenizer("EN-StemSWRem");
        //SentenceTokenizer_2 st = new SentenceTokenizer("DE-Stem");
        SentenceTokenizer st = new SentenceTokenizer("NO");
        
        //ArrayList<String> wordpairsNew = st.processSentence("das eine Menge 2 Larm timen medizin.", "null");
        //ArrayList<String> wordpairsNew = st.processSentence("This was be funny, really funnier than that! Don't you think?", null);
        ArrayList<String> wordpairsNew = st.processSentence("Dette er veldig moro, veldig!", "prepVocabulary.txt");
        
        System.out.println("\n\nPrep:");
        for (String word : wordpairsNew) {
            System.out.println(word);
        }        
        
    }
}
