/*
 * Saphre - Suffix Arrays for Phrase Extraction
 * Copyright (C) 2013 
 * Dale Gerdemann - Tübingen, Germany 
 * Niko Schenk - Frankfurt am Main, Germany
 * All rights reserved.
 *
 * This program is free software: you can redistribute it and/or modify it under
 * the terms of the GNU General Public License as published by the Free Software
 * Foundation, either version 3 of the License, or (at your option) any later
 * version.
 *
 * This program is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
 * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
 * details.
 *
 * You should have received a copy of the GNU General Public License along with
 * this program. If not, see <http://www.gnu.org/licenses/>.
 *
 */
package util.statistics;

import java.io.PrintWriter;
import java.math.BigInteger;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Map;
import java.util.Set;
import util.Interval;
import saphre.core.Store;
import saphre.core.SuffixArray;
import util.sorting.Multiset;
import util.IntString;

/**
 * Class which implements various statistical measures over n-grams.
 * TODO: Needs some more method descriptions.
 * 
 * @author Niko Schenk
 */
public class StatisticsHandler {

    // The number of documents in the corpus.
    private double D;

    /**
     * Set up a StatisticsHandler
     * @param aD the number of documents in the corpus.
     */
    public StatisticsHandler(double aD) {
        this.D = aD;
    }
    
    /*
     * logarithm base 2.
     */
    private static double log2(double x) {
        if (x == 0.0) {
            return 0.0;
        }
        return Math.log(x) / Math.log(2);
    }

    /**
     * 
     * @param f1
     * @param f2
     * @param O
     * @param sa
     * @return 
     */
    public static double mi(int f1, int f2, int O, SuffixArray sa) {
        double E = f1 * f2 / (double) sa.N;   // c
        double MI = log2(O / E);
        return MI;
    }

    /**
     * 
     * @param w1
     * @param w2
     * @param sa
     * @return 
     */
    public static double mi(int w1, int w2, SuffixArray sa) {

        int[] unipat = new int[1];
        int[] bipat = new int[2];
        unipat[0] = w1;
        int f1 = sa.search(unipat, sa.top).tf();
        unipat[0] = w2;
        int f2 = sa.search(unipat, sa.top).tf();
        bipat[0] = w1;
        bipat[1] = w2;
        int O = sa.search(bipat, sa.top).tf();
        double E = f1 * f2 / (double) sa.N;   // c
        double MI = log2(O / E);
        return MI;
    }

    /**
     * Method used to compute the mutual information of a gappy phrase.
     *
     * @param tf of the gappy phrase, i.e. the cum tf of all fillers
     * @param w1 the left context
     * @param w2 the right context
     * @param sa
     * @return
     */
    public static double mi(int tf, int[] w1, int[] w2, SuffixArray sa) {
        int f1 = sa.search(w1, sa.top).tf();
        int f2 = sa.search(w2, sa.top).tf();
        //int[] bipat = Array.plus(w1, w2); ???
        int O = tf;
        double E = f1 * f2 / (double) sa.N;
        double MI = log2(O / E);
        return MI;
    }

    /**
     * 
     * @param tf
     * @param df
     * @return 
     */
    public double getRidf(int tf, int df) {
        double observed = -Math.log(df / D);
        double predicted = Math.log(1 - Math.exp(-tf / D));
        double ridf = observed + predicted;
        return ridf;
    }

    /**
     * 
     * @param tf
     * @param df
     * @param D
     * @return 
     */
    public static double ridf(int tf, int df, double D) {
        double observed = -Math.log(df / D);
        double predicted = Math.log(1 - Math.exp(-tf / D));
        double ridf = observed + predicted;
        return ridf;
    }

    /**
     * 
     * @param tf
     * @param docDist
     * @param D
     * @return 
     */
    public static double ridf(int tf, Multiset docDist, double D) {
        int df = docDist.size();
        double observed = -Math.log(df / D);
        double predicted = Math.log(1 - Math.exp(-tf / D));
        double ridf = observed + predicted;
        return ridf;
    }

    /**
     * 
     * @param leftPart
     * @param bridge
     * @param rightPart
     * @param sa
     * @return 
     */
    public static double computeAverageMiForGappyComponents(int[] leftPart, Map<IntString, Set<Integer>> bridge, int[] rightPart, SuffixArray sa) {
        double cumMi = 0.0;
        ArrayList<int[]> gappyComponents = StatisticsHandler.getGappyComponents(leftPart, bridge, rightPart);
        for (int[] gappyComponent : gappyComponents) {
            double miNgram = miYamamotoChurch(gappyComponent, sa);
            //System.out.println(miNgram);
            cumMi += miNgram;
        }
        return (double) cumMi / gappyComponents.size();
    }

    /**
     * 
     * @param bridge
     * @param sa
     * @param store
     * @param D
     * @param ngramMap
     * @return 
     */
    public static double[] computeSumTFAndAverageRidfOfFillers(Map<IntString, Set<Integer>> bridge, SuffixArray sa,
            Store store, double D, Map<String, Ngram> ngramMap) {

        double[] rval = new double[3];
        double cumRidf = 0.0;
        double sumTfOfFillers = 0.0;
        //System.out.println("map size: " + ngramMap.size());
        for (IntString f : bridge.keySet()) {
            int[] words = f.toArray();
            
            String ngramString = store.toString(words);
            //System.out.println("ngramString: " + ngramString);
            Ngram ngramObject = ngramMap.get(ngramString);
            int tf;
            int df;
            // We found an entry in our ngram list for this filler.
            if(ngramObject!=null) {
                tf = ngramObject.getTf();
                sumTfOfFillers += tf;
                df = ngramObject.getDf();
            }
            // otherwise search tree for this filler and add it to the map.
            else {
                tf = sa.search(words).tf();
                sumTfOfFillers += tf;
                df = sa.search(words).df(sa, store);
                
                Ngram ngram = new Ngram();
                ngram.setTf(tf);
                ngram.setDf(df);
                ngramMap.put(ngramString, ngram);
            }
            
            //System.out.println("Filler: " + store.toString(words) + " TF: " + tf + " DF: "+ df);
            double ridfNgram = ridf(tf, df, D);
            cumRidf += ridfNgram;
        }
        rval[0] = sumTfOfFillers; // sum tf of fillers.
        rval[1] = (double) cumRidf / bridge.size(); // average ridf of fillers.
        return rval;
    }

    /**
     * Inspiration taken from:
     * http://stackoverflow.com/questions/80476/how-to-concatenate-two-arrays-in-java
     *
     * @param <T>
     * @param first
     * @param rest
     * @return
     */
    public static int[] concatAll(int[] first, int[]... rest) {
        int totalLength = first.length;
        for (int[] array : rest) {
            totalLength += array.length;
        }
        int[] result = Arrays.copyOf(first, totalLength);
        int offset = first.length;
        for (int[] array : rest) {
            System.arraycopy(array, 0, result, offset, array.length);
            offset += array.length;
        }
        return result;
    }

    private static ArrayList<int[]> getGappyComponents(int[] leftPart, Map<IntString, Set<Integer>> bridge, int[] rightPart) {
        ArrayList<int[]> rval = new ArrayList<int[]>();
        // For all fillers.
        for (IntString aFiller : bridge.keySet()) {
            int[] middle = aFiller.toArray();

            int[] rvalarr = concatAll(leftPart, middle, rightPart);
            rval.add(rvalarr);

        }
        return rval;
    }

    /**
     * Computation of mutual information according to Yamamoto & Church
     *
     * @param words
     * @param sa
     * @return
     */
    public static double miYamamotoChurch(int[] words, SuffixArray sa) {
        // If it is a unigram.
        if (words.length == 1) {
            return -100.0;
        }
        // If it is a bigram.
        if (words.length == 2) {
            double nominator = sa.search(words, sa.top).tf() * sa.N;
            int[] firstword = new int[1];
            firstword[0] = words[0];
            int[] secondword = new int[1];
            secondword[0] = words[1];
            double denominator = sa.search(firstword, sa.top).tf() * sa.search(secondword, sa.top).tf();
            return (double) Math.log(nominator / denominator);
        } // General n-gram.
        else {
            // a b c d
            int[] leftpart = new int[1];
            leftpart[0] = words[0]; // a 
            int[] rightpart = new int[1];
            rightpart[0] = words[words.length - 1]; // d
            int[] middlepart = new int[words.length - 2]; // 2
            // fill middle part.
            for (int pos = 1; pos <= words.length - 2; pos++) {
                middlepart[pos - 1] = words[pos]; // middlepart[0] = b, middlepart[1] = c.
            }
            double nominator = sa.search(words, sa.top).tf() * sa.search(middlepart, sa.top).tf();

            int[] leftdenom = new int[middlepart.length + 1];
            leftdenom[0] = words[0];
            for (int pos = 0; pos < middlepart.length; pos++) {
                leftdenom[pos + 1] = middlepart[pos];
            }

            int[] rightdenom = new int[middlepart.length + 1];
            for (int pos = 0; pos < middlepart.length; pos++) {
                rightdenom[pos] = middlepart[pos];
            }
            rightdenom[rightdenom.length - 1] = words[words.length - 1];


            double denominator = sa.search(leftdenom, sa.top).tf() * sa.search(rightdenom, sa.top).tf(); // xY * Yz.
            return (double) Math.log(nominator / denominator);
        }
    }

    /**
     * Pointwise Mutual Information generalized to n variables.
     *
     * @param words
     * @param sa
     * @return
     */
    public static double miGeneralNgram(int[] words, SuffixArray sa) {
        // Avoid overflow !!!
        BigInteger my = new BigInteger("1");
        long allFreqsmultiplied = 1;
        for (int i = 0; i < words.length; i++) {
            int[] unipat = new int[1];
            unipat[0] = words[i];
            long anF = sa.search(unipat, sa.top).tf();
            //-   System.out.println("f" + (i+1) + ": of word \"" + words[i] + "\": " + anF);
            //System.out.println("-> " + allFreqsmultiplied);
            allFreqsmultiplied *= anF;
            BigInteger anew = new BigInteger(String.valueOf(anF));
            my = my.multiply(anew);
        }

       //System.out.println("allfreqs: " + allFreqsmultiplied);    

        int O = sa.search(words, sa.top).tf();
        //System.out.println("O: " + O);
        //double O = sa.search(words, sa.top).tf() / (double) sa.N; // NOTE: NIKO: HIER NOCH DURCH N TEILEN ?????
        
        //System.out.println("O: " + O);

        //System.out.println(allFreqsmultiplied + " <----> " + sa.N);
        //System.out.println(my + " <----> " + sa.N);

        //double E = allFreqsmultiplied / (double) sa.N;   // c
        double E = my.floatValue() / (double) sa.N;   // c   
        //System.out.println("sa.N " + sa.N);
        //System.out.println("E: " + E);
        double MI = log2((double) (O / E));
        //System.out.println("MI: " + MI);
        return MI;
    }

    /**
     * Compute p_i for all j.
     *
     * @param distributionString
     * @return
     */
    public ArrayList<double[]> computePij(Multiset dd, int gfi) {
        // {(1,2),(2,1)}

        // a[0] = j // document number.
        // a[1] = tf_ij // tf of i in document j.
        // a[2] = p_ij // probability of i in j.
        // a[3] = df_i // document frequency of term i

        String distributionString = dd.toString();

        ArrayList<double[]> rval = new ArrayList<double[]>(1000);
        // Init for every document.
        // TODO: Debug.

        String entries = distributionString.substring(1, distributionString.length() - 1);
        String[] js = entries.split("\\),\\(");


        for (String doc : js) {
            double[] a = new double[4];
            doc = doc.replace(")", "").replace("(", "");
            // Document number j.
            int j = Integer.parseInt(doc.substring(0, doc.indexOf(",")));
            // Term frequency of term i in document j.
            int tfi_j = Integer.parseInt(doc.substring(doc.indexOf(",") + 1));
            //System.out.print("j: " + j + ", ");
            //System.out.println("tf_j: " + tfi_j);
            a[0] = j;
            a[1] = tfi_j;
            a[2] = (double) tfi_j / gfi;
            a[3] = (double) js.length;

            // add
            rval.add(a);
        }
        return rval;
    }

    /**
     * Compute g_i, the global component "entropy".
     *
     * @param triplets
     * @param D
     * @return
     */
    public double idf(double df) {
        return log2(D / (df));
    }

    /**
     * Compute g_i, the global component "entropy".
     *
     * @param triplets
     * @param D
     * @return
     */
    public double entropy(ArrayList<double[]> triplets, double D) {
        double rval = 1.0;
        for (double[] triplet : triplets) {
            double nominator = triplet[2] * log2(triplet[2]);
            double denominator = log2(D);
            rval += (nominator / denominator);
        }
        return rval;
    }

    /**
     * 
     * @param tf
     * @param dd
     * @param p_ijs
     * @param pw 
     */
    public void logEntropy(int tf, Multiset dd, ArrayList<double[]> p_ijs, PrintWriter pw) {

        // Compute entropy here!
        double global = entropy(p_ijs, D);
        //System.out.println("global: " + global);
//            for (double[] triplet : p_ijs) {
//                System.out.println(triplet[0] + " / " + triplet[1] + " / " + triplet[2]);
//            }

        // For all documents with a focus on the current word.
        double docNum = 1.0;
        for (int t = 0; t < p_ijs.size(); t++) {
            double[] triplet = p_ijs.get(t);
            double j = triplet[0];
            while (docNum < j) {
                //System.out.print("doc: " + docNum + " -> " + " nix " + "\t");
                //-     System.out.print("0.0" + ",");
                pw.print("0.0" + ",");
                docNum++;
            }
            // Check if we have found the word in the current document.
            double local = log2(triplet[1] + 1);
            //System.out.print("local: " + local);
            double local_global = local * global;
            //System.out.print("doc: " + j + " -> " + local_global + "\t");
            //-  System.out.print(String.format("%.6g", local_global) + ",");
            pw.print(String.format("%.6g", local_global) + ",");
            docNum++;
        }
        // For all docs after the last one.
        while (docNum <= D) {
            //System.out.print("doc: " + docNum + " -> " + " nix " + "\t");
            //-   System.out.print("0.0,");
            pw.print("0.0,");
            docNum++;
        }
        //- System.out.println();

    }

    /**
     * 
     * @param tf
     * @param dd
     * @param pw 
     */
    public void tfIdf(int tf, Multiset dd, PrintWriter pw) {
        StringBuilder sb = new StringBuilder();
        ArrayList<double[]> p_ijs = computePij(dd, tf);

        // Compute idf here!
        double global = idf(p_ijs.get(0)[3]);
        // For all documents with a focus on the current word.
        double docNum = 1.0;
        for (int t = 0; t < p_ijs.size(); t++) {
            double[] triplet = p_ijs.get(t);
            double j = triplet[0];
            while (docNum < j) {
                sb.append("0.0" + ",");
                docNum++;
            }
            // Check if we have found the word in the current document.
            double local = triplet[1];
            //System.out.print("local: " + local);
            double local_global = local * global;
            //System.out.print("doc: " + j + " -> " + local_global + "\t");
            //-    System.out.print(String.format("%.6g", local_global) + ",");
            sb.append(String.format("%.6g", local_global) + ",");
            docNum++;
        }
        // For all docs after the last one.
        while (docNum <= D) {
            //System.out.print("doc: " + docNum + " -> " + " nix " + "\t");
            //-    System.out.print("0.0,");
            sb.append("0.0,");
            docNum++;
        }
        // Remove last "," comma.
        pw.print(sb.toString().substring(0, sb.toString().length() - 1));
        //- System.out.println();
    }

    /**
     * 
     * @param lcp
     * @param ngram
     * @param inter
     * @param tf
     * @param df
     * @param dd
     * @param sa
     * @param store
     * @param pw
     * @param generalngram 
     */
    public void computeAndPrintStatistics(int lcp, String ngram, Interval inter, int tf, int df, Multiset dd, SuffixArray sa, Store store, PrintWriter pw, int[] generalngram) {

        //  System.out.println(lcp + " -gram: \"" + ngram + "\""); 
        pw.write("\"" + ngram + "\"," + "\t");
        pw.flush(); //-

        ///     pw.write(lcp + "\t");

        //int frequencyOfNgram = sa.search(generalngram, sa.top).tf();

        //  System.out.println("tf of ngram: "+ tf); 
        ///     pw.write(tf + "\t"); pw.flush(); //-
        //  System.out.println("df of ngram: " + df); 
        ///      pw.write(df + "\t"); pw.flush(); //-
        //  System.out.println("Document distribution: " + inter.docDist(sa, store)); //-

        //     computeSkew(store,dd, pw);


        ArrayList<double[]> p_ijs = computePij(dd, tf);

        double mi = miGeneralNgram(generalngram, sa);

        double miYamamotoChurch = miYamamotoChurch(generalngram, sa);

        //  System.out.println("MI: " + mi); //-
        ///      pw.write(mi + "\t"); pw.flush();
        ///      pw.write(miYamamotoChurch + "\t"); pw.flush(); //-

//             if(lcp == 3) {
//             double minew = miSuWuChangTrigram(generalngram, sa, sa.N);
//             pw.write(minew + "\t");
//             }
//             else {
//                 pw.write("0.0\t");
//             }

        int numDocs = dd.size();
        System.out.println("You have " + numDocs + " num docs.");

        double ridf = ridf(tf, df, numDocs);
        //   System.out.println("RIDF: " + ridf); //-
        ///        pw.write(ridf + "\t"); pw.flush(); //-

        //double idf = getIdf(tf, df);
        //System.out.println("IDF: " + idf);

        //  System.out.print("TfIdf: "); 
        tfIdf(tf, dd, pw); //-
        //   pw.write("\t");
        //  System.out.print("LogEntropy: "); 
        //         logEntropy(tf, dd, p_ijs, pw); //-
        //   pw.write("\t");
    }

    /**
     * Check if a maximal string is also supermaximal.
     *
     * @param inter
     * @param sa
     * @param lcp
     * @return
     */
    public static boolean isSupermaximal(Interval inter, SuffixArray sa, int lcp) {

        int[] sequence = sa.getText();
        int[] suffixTable = sa.getSuftab();

        int left = inter.lb();
        int right = inter.rb();

        // System.out.println("Checking: " + left + " " + right);


        for (int i = left; i <= right; i++) {
            for (int j = left; j <= right; j++) {
                // compare nodes.
                if (i != j) {
                    // System.out.println("node i: " + i + "!= node j:" + j);
                    int iLoc = suffixTable[i];
                    int jLoc = suffixTable[j];
                    // System.out.println("iLoc: " + iLoc + ", jLoc: "+ jLoc);
                    // Always compares the BWT tab (one before the begin of the suffixes
                    // in the terminals.
                    // Lookup char before suffixstart in text.
                    int iSym = sequence[iLoc - 1];
                    int jSym = sequence[jLoc - 1];
                    // System.out.println("iSym: " + iSym + ", jSym: "+ jSym);
                    if (iSym == jSym) {
                        //System.out.println("abbruch.");
                        return false;
                    }

                    // Look one forward and compare.
                    // Springe noch um "lcp" Schritte nach vorn um den gleichen Teil des
                    // Suffixes zu überspringen und den Folgecharacter zu checken.
                    // System.out.println("lcp" + lcp);
                    int iForward = sequence[iLoc + lcp];
                    int jForward = sequence[jLoc + lcp];
//                    System.out.println("iForward: " + iForward + ", jForward: "+ jForward);
                    if (iForward == jForward) {
//                        System.out.println("abbruch.");
                        return false;
                    }
                }
            }
        }
        return true;
    }

    /**
     * Computation of mutual information for trigrams according to
     * http://acl.ldc.upenn.edu/P/P94/P94-1033.pdf
     *
     * @param words
     * @param sa
     * @param N
     * @return
     */
    public static double miSuWuChangTrigram(int[] words, SuffixArray sa, int N) {

        int[] uni1 = new int[1];
        uni1[0] = words[0];

        int[] uni2 = new int[1];
        uni2[0] = words[1];

        int[] uni3 = new int[1];
        uni3[0] = words[2];

        int[] firstBi = new int[2];
        firstBi[0] = words[0];
        firstBi[1] = words[1];

        int[] lastBi = new int[2];
        lastBi[0] = words[1];
        lastBi[1] = words[2];

        int[] tri = new int[3];
        tri[0] = words[0];
        tri[1] = words[1];
        tri[2] = words[2];


        int freqWord1 = sa.search(uni1, sa.top).tf();
        int freqWord2 = sa.search(uni2, sa.top).tf();
        int freqWord3 = sa.search(uni3, sa.top).tf();

        int bigram1and2Freq = sa.search(firstBi, sa.top).tf();
        int bigram2and3Freq = sa.search(lastBi, sa.top).tf();

        int trigramFreq = sa.search(tri, sa.top).tf();


        //mi(term1, term2, term3) = 
        //log2(
        //P(t1,t2,t3)
        // geteilt durch
        //(P(t1)*P(t2)*P(t3) + P(t1)*P(t2,t3) + P(t1,t2)*P(t3))
        // )
        double nominator = (double) trigramFreq / (double) (N - 2);
        double first1 = (double) freqWord1 / (double) N;
        double second1 = (double) freqWord2 / (double) N;
        double third1 = (double) freqWord3 / (double) N;
        double firstFactor = first1 * second1 * third1;
        double first2 = (double) freqWord1 / (double) N;
        double second2 = (double) bigram2and3Freq / (double) (N - 1);
        double secondFactor = first2 * second2;
        double first3 = (double) bigram1and2Freq / (double) (N - 1);
        double second3 = (double) freqWord3 / (double) N;
        double thirdFactor = (double) first3 * second3;
        double denominator = firstFactor + secondFactor + thirdFactor;
        double mi = (double) (nominator / denominator);
        return mi;
    }
    
}
