package de.recipeminer.mining;

import com.aliasi.spell.TfIdfDistance;
import com.aliasi.tokenizer.IndoEuropeanTokenizerFactory;
import com.aliasi.tokenizer.Tokenizer;
import com.aliasi.tokenizer.TokenizerFactory;
import de.recipeminer.environments.ExecutionContextProvider;
import de.recipeminer.environments.FileFinder;
import de.recipeminer.persistence.Db4oCompanionFactory;
import de.recipeminer.persistence.Db4oCompanionProvider;
import de.recipeminer.persistence.Db4oHelpers;
import de.recipeminer.persistence.Db4oObjectCompanion;
import de.recipeminer.tools.Config;
import org.apache.log4j.Logger;

import java.io.BufferedWriter;
import java.io.File;
import java.io.FileWriter;
import java.io.IOException;
import java.util.*;
import java.util.Map.Entry;



/**
 * Helper for LingPipes TfIdfDisctance.
 *
 * @author Georg Mühlenberg
 */
public class TermFrequencyImpl implements Db4oCompanionProvider<TermFrequencyImpl> {
    private static final Logger logger = Logger.getLogger(TermFrequencyImpl.class);
    private String db4oIdent = TermFrequencyImpl.class.getCanonicalName();
    private static TermFrequencyImpl instance = null;

    private TokenizerFactory tokenizer;
    protected Set<String> actualStopwordList; //actual recipeExpamples of stopwords. reset if new instance is created.
    private File stopwordfile;
    protected static TreeMap<String,Integer> globalTermFrequencies = new TreeMap<String,Integer>();



    private TfIdfDistance distance;

    //a recipeExpamples of keys of recipes (in case of chefkoch: UUID) already trained
    Set<String> already_trained_texts = new HashSet<String>();

    /**
     * Returns class name of TermFrequencyImp" as unique identifier String in the database.
     * Singleton class. (see {@link de.recipeminer.persistence.Db4oCompanionProvider#db4oIdentifier})
     *
     * @return name of this class
     */
    public String db4oIdentifier() {
        return db4oIdent;
    }

    /**
     * {@inheritDoc}
     */
    @Override
    public String db4oIdentifierFieldName() {
        return "db4oIdent";
    }

    @Override
    public void updateIdentifier() {
        //nothing to be done here
    }

    /**
     * Constructs a new TermFrequencyImpl Class and resets actualStopwordList and TfIdfDistance.
     * As this file will be saved in Object Database, constructor is to be called only once in
     * the program's lifecycle.
     */
    private TermFrequencyImpl() {
        initializeNewDistance();
        logger.info("Reset Term statistics...");

        clearStopWordList();
        logger.info("Reset stopword recipeExpamples...");
    }

    public static TermFrequencyImpl getInstance() {
        if (instance == null || instance.getDb4oCompanion().similarObjectsPresentInDatabase()) {
            TermFrequencyImpl persistedInstance =
                    Db4oHelpers.getPersistedSingletonInstance(TermFrequencyImpl.class);
            if (persistedInstance != null)
                logger.trace("Returning persited instance from " + Db4oHelpers.currentDatabase().getDatabaseFile());
            else
                logger.trace("Creating and returning new instance.");
            instance = persistedInstance != null ? persistedInstance : new TermFrequencyImpl();
        }
        return instance;
    }

    public static TreeMap<String,Integer> globalTermFrequencies() {
            return globalTermFrequencies;
    }
    /**
     * Tries reading the actualStopwordList from database.
     *
     * @return True if found, False if no file found.
     */
    public boolean createNewStopWordSetFromFile() {
        try {
            stopwordfile = ExecutionContextProvider.currentResourceFinder().getFile(Config.STOPWORD_FILE_LOCATION);
        } catch (RuntimeException e) {
            /*
             * in case the method getResourceFile() cannot find the file, it will throw a UnableToLocateException
             */
            logger.info("No Stopwordlist found at " + stopwordfile.getAbsoluteFile() + "\tCreating a new one.");

            return false;
        }
        return true;
    }

    /**
     * Write a stopword file to standard location ( stopwords.txt )
     * and with the standard amount of 50 new stopword candidates.
     */
    public StopWordFile writeStopWordFile() {
        return writeStopWordFile(Config.STOPWORD_FILE_LOCATION,
                                 termFrequencyDictionary().getStopWordCandidates(Config.NEW_STOPWORD_STD_AMOUNT));
    }

    /**
     * Extracts the stopwords from standard location.
     * Clears current set of stopwords and overwrites with newly extracted set.
     */
    public TreeSet<String> updateStopWordSetFromFile() {
        return updateStopWordSetFromFile(Config.STOPWORD_FILE_LOCATION);
    }

    /**
     * Extracts the stopwords from designated location.
     * Clears current set of stopwords and overwrites with newly extracted set.
     * *
     *
     * @param childpath The name of the stopword file
     * @return A TreeSet of the new stopwords
     */
    public TreeSet<String> updateStopWordSetFromFile(String childpath) {
        StopWordFile stwf = new StopWordFile(FileFinder.getProjectParentPath(), childpath);
        TreeSet<String> extractedStopWords = StopWordFile.extractStopWordsFromFile(stwf);
        actualStopwordList.clear();
        actualStopwordList.addAll(extractedStopWords);
        return extractedStopWords;
    }

    public int getSizeOfActualStopwordList() {
        return actualStopwordList.size();
    }

    /**
     * Creates stopwordset and writes file to specified filename.
     *
     * @param childpath          filename like "example.txt"
     * @param stopwordcandidates you can compute these by invoking termFrequencyDictionary().
     */
    public StopWordFile writeStopWordFile(String childpath, TreeSet<String> stopwordcandidates) {

        String parentPath = FileFinder.getProjectParentPath();
        //move up in directory
        StopWordFile stopwordfile = new StopWordFile(parentPath, childpath);
        FileWriter fstream;
        BufferedWriter out;

        //remove all not-new stopword candidates.
        stopwordcandidates.removeAll(actualStopwordList);

        //String composition delegated to Class StopWordFile
        String stringToWrite = StopWordFile.compositeStopwordFileText(actualStopwordList, stopwordcandidates);

        //Get write access to file
        try {
            boolean exist = stopwordfile.createNewFile();
            if (exist)
                logger.warn("Overwriting existing file " + parentPath + File.pathSeparator + childpath);
            fstream = new FileWriter(stopwordfile);
            out = new BufferedWriter(fstream);
            out.write(stringToWrite);
            out.close();
        } catch (IOException ex) {
            logger.error("Sorry, could not write stopword file " + parentPath + File.pathSeparator + childpath, ex);
            ex.printStackTrace();
            return stopwordfile;
        } finally {


        }
        return stopwordfile;
    }

    @Override
    public Db4oObjectCompanion getDb4oCompanion() {
        return Db4oCompanionFactory.getInstance().companionFor(this);
    }


    public boolean validate() {
        return true;
    }

    public TermFrequencyDictionary termFrequencyDictionary() {
        //getting the set of all terms known to this TfIdfDistance
        Set<String> termset = distance.termSet();
        return new TermFrequencyDictionary(termset);
    }

    /**
     * Creates a whole new TF/IDF-Distance. Resets global term frequency list.
     */
    public void initializeNewDistance() {
        TokenizerFactory tokenizerFactory = IndoEuropeanTokenizerFactory.INSTANCE;
        this.distance = new TfIdfDistance(tokenizerFactory);
        this.globalTermFrequencies = new TreeMap<String,Integer>();
    }

    /**
     * Gives the distance pointer for serialisation purposes
     *
     * @return TfIdfDistance distance
     */
    protected TfIdfDistance getDistance() {
        return distance;
    }

    /**
     * Prevents against double training with TF/IDF Distance.
     *
     * @param trainingstring text to train.
     */
    public void trainWith(String trainingstring) {
        //transform the String to train with in an idiosyncratic hashCode.
        Integer i = new Integer(trainingstring.hashCode());
        String key = i.toString();

        if (already_trained_texts.contains(key)) {
            logger.info("already trained text \"" + trainingstring.substring(0, 20) + "...\"");
            return;
        } else {
            actualizeGlobalTermFrequencies(trainingstring);
            already_trained_texts.add(key);
            distance.handle(trainingstring);
        }
    }
    /**
     * Splits the training document into terms by the lingpipe Tokenizer and
     * counts the terms.
     * @param s
     */
    private void actualizeGlobalTermFrequencies(String wholetrainingdocument) {
    Tokenizer tokenizer = IndoEuropeanTokenizerFactory.INSTANCE.tokenizer(wholetrainingdocument.toCharArray(), 0, wholetrainingdocument.length());
        for ( String term : tokenizer.tokenize()) {
            if (globalTermFrequencies.containsKey(term)) {
                int i = globalTermFrequencies.get(term);
                globalTermFrequencies.put(term, ++i);
            }
            else {
                globalTermFrequencies.put(term, 1);
            }
        }
    }

    public void clearStopWordList() {
        actualStopwordList = new HashSet<String>();
    }

    public class TermFrequencyDictionary {
        TreeMap<String, Double> docfreqmap = new TreeMap<String, Double>();
        TreeMap<String, Double> idfmap = new TreeMap<String, Double>();

        private TermFrequencyDictionary(TreeMap<String, Double> tm1, TreeMap<String, Double> tm2) {
            this.docfreqmap = tm1;
            this.idfmap = tm2;
        }

        /**
         * Construct TermFrequencyDictionary from nothing more than a termset.
         *
         * @param termset
         */
        private TermFrequencyDictionary(Set<String> termset) {
            for (Iterator<String> it = termset.iterator(); it.hasNext(); ) {
                String s = it.next();
                docfreqmap.put(s, new Double(distance.docFrequency(s)));
                idfmap.put(s, distance.idf(s));
            }
        }

        public TreeMap<String, Double> documentFrequencyMapping() {
            return docfreqmap;
        }

        /**
         * Compute the most frequent terms known to this distance as stopword candidates.
         *
         * @param abs absolute number of stopword candidates to export
         * @return SortedSet of stop word candidates. Size is maximum {@code abs}.
         * @author Georg Mühlenberg
         * @see TermFrequencyDictionary#getStopWordCandidates(double relative)
         */
        public TreeSet<String> getStopWordCandidates(int abs) {
            LinkedList<TreeSet<String>> termCountClasses = computeClassesOfTotalTermFrequencies();

            /*
            * Fill the stopword candidate set by iterating through the terms with counts from highest to low.
            */
            int docFreq = 0;

            TreeSet<String> stopwordcandidates = new TreeSet<String>();
            for (Iterator<TreeSet<String>> iterator = computeClassesOfTotalTermFrequencies().descendingIterator(); iterator.hasNext(); ) {
                //get Current Set
                TreeSet<String> currentTreeSet = iterator.next();
                //assure it is not null
                if (!(currentTreeSet.isEmpty()) && (currentTreeSet != null)) {
                    //determine how often the terms in currentTreeSet occur.
                    docFreq = getDistance().docFrequency(currentTreeSet.first());
                    //now fill up the set of stopwordcandidates.
                    for (Iterator<String> it = currentTreeSet.iterator(); it.hasNext(); ) {
                        String currentTerm = it.next();
                        if (stopwordcandidates.size() < abs) stopwordcandidates.add(currentTerm);
                    }
                }

            }
            return stopwordcandidates;
        }

        /**
         * <b>Important data structure for sorting of terms by their frequencies.</b><p>
         * In {@code computeClassesOfTotalTermFrequencies[x]} there is a {@linkplain TreeSet} of all terms with document occurence count of x and so on.
         * whereas in computeClassesOfTotalTermFrequencies[0] there will be just an empty set.
         * optical representation:
         * {@code } <p>
         * {@code [0] { } }<p>
         * {@code [1] { all terms that were seen once in document training } }<p>
         * {@code [2] { all terms that were seen twice in document training } }<p>
         * {@code ... }
         */
        public LinkedList<TreeSet<String>> computeClassesOfTotalTermFrequencies() {
            TreeSet<Integer> valueset = new TreeSet<Integer>();
            //obtain all the values example: {1.0, 2.0, ..., 10.0, 13.0}
            valueset.addAll(globalTermFrequencies.values());

            /*
            *  Introducing data structure for upcoming blocks of code.
            *  In computeClassesOfTotalTermFrequencies[x] there is a set of all terms with document occurence count of x and so on
            *  whereas in [0] there will be nothing.
            */
            LinkedList<TreeSet<String>> docFrequencyClasses;
            docFrequencyClasses = new LinkedList<TreeSet<String>>();

            /*
            *  Fill the LinkedList computeClassesOfTotalTermFrequencies with the suiting terms.
            *  All known terms have to be processed.
            */
            for (Iterator<Entry<String, Double>> iterator = docfreqmap.entrySet().iterator(); iterator.hasNext(); ) {
                // get entry out of HashMap, example: (backen , 4.0)
                Entry<String, Double> currentEntry = iterator.next();
                //determine document frequency of this term, example: 4
                int freqCount = currentEntry.getValue().intValue();
                //determine Correct Set to add the term to
                TreeSet<String> setToAddTo = null;
                //try a lookup in the recipeExpamples
                try {
                    setToAddTo = docFrequencyClasses.get(freqCount);
                } catch (java.lang.IndexOutOfBoundsException e) {
                    //recipeExpamples not initialized at this point? no problem, just through all the recipeExpamples and fill it with new sets.
                    //this will happen for maximum n times, where n is the maximum doc freq count.
                    for (int i = docFrequencyClasses.size(); i <= freqCount; i++) {
                        docFrequencyClasses.addLast(new TreeSet<String>());
                    }
                    setToAddTo = new TreeSet<String>();
                    docFrequencyClasses.add(freqCount, setToAddTo);
                }              //add term to this set
                setToAddTo.add(currentEntry.getKey());
            }
            return docFrequencyClasses;
        }

        /**
         * Provides a relative amount of the known terms, sorted descending, as new stopword candidates.
         *
         * @param relative The factor to determine how much candidates will be returned of all known terms.
         * @return SortedSet of stop word candidates. Size is maximum {@code abs / relative}.
         * @author Georg Mühlenberg
         * @see
         */
        public SortedSet<String> getStopWordCandidates(double relative) {
            return getStopWordCandidates((int) documentFrequencyMapping().size() * relative);
        }


        public TreeMap<String, Double> inverseDocFreqMapping() {
            return idfmap;
        }

        /**
         * Pretty-prints a TermFrequencyDictionary and returns the suiting String.
         *
         * @return
         */
        @Override
        public String toString() {
            String formatstring = "%18s  %9d  %7.2f\n";
            StringBuffer resultstring = new StringBuffer();
            resultstring.append(String.format("%18s  %9s  %4s\n", "Term", "Doc Freq", "IDF"));

            Set<String> termList = new TreeSet<String>();

            //Compute Union of the two Sets.
            termList.addAll(docfreqmap.keySet());
            termList.addAll(idfmap.keySet());
            for (Iterator<String> it = termList.iterator(); it.hasNext(); ) {
                String s = it.next();
                resultstring.append(String.format(formatstring, s, docfreqmap.get(s).intValue(), idfmap.get(s)));
            }
            return resultstring.toString();
        }

/*public TermFrequencyDictionary fromString(String s)
    {

}*/

    }
}
