/*
 * Saphre - Suffix Arrays for Phrase Extraction
 * Copyright (C) 2013 
 * Dale Gerdemann - Tübingen, Germany 
 * Niko Schenk - Frankfurt am Main, Germany
 * All rights reserved.
 *
 * This program is free software: you can redistribute it and/or modify it under
 * the terms of the GNU General Public License as published by the Free Software
 * Foundation, either version 3 of the License, or (at your option) any later
 * version.
 *
 * This program is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
 * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
 * details.
 *
 * You should have received a copy of the GNU General Public License along with
 * this program. If not, see <http://www.gnu.org/licenses/>.
 *
 */
package util.tokenization_io;

import java.io.File;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import util.sedgewick.Stopwatch;

/**
 * This class is used as a playground for testing the efficiency of the
 * preprocessing steps (all steps before getting to the actual suffix array
 * construction. By running this, you can see that the tokenization is the major
 * bottleneck. So if anyone needs a project to work on, that's where things
 * could be improved. It's complicated though since the test corpus comes from
 * Usenet forums, and the tokenizer is doing a lot of cleanup.
 * <p/>
 * A good webpage on Java performance tuning can be found at:
 * http://www.testing-software.org/Tools/gcview/index.html.
 * <p/>
 * <
 * p/>
 * This class started life as an adaptation of Sedgewick's TST symbol table. I'm
 * still uncertain whether this has any advantages over standard Java
 * Collections. I'm still experimenting. But, in any case, the main bottleneck
 * is in tokenizing.
 * <p/>
 * A Symbol Table based on http://www.cs.princeton.edu/algs4/52trie/TST.java,
 * but specialized for use with ks.Store. The main method shows some timing
 * tests. Typical results are:
 * <p/>
 * Time for dedicated st: 3.65 Time for generic st: 6.081
 * <p/>
 * The dedicated symbol table is not quite twice as fast.
 * <p/>
 * A little more experimentation shows that the dedicated symbol table is only
 * slightly faster for construction. Most of the benefit comes from the faster
 * access (
 * <code>get</code> method).
 * <p/>
 * Update, the comparison above was with TreeMap. Against HashMap, this class is
 * only a small (maybe 5%) improvement.
 * <p/>
 * Bug/limitation: How can you insert the empty string?
 */
public class SymTable {

    private int N;       // size
    Node root;   // root of TST

    private class Node {

        char c;                 // character
        Node left, mid, right;  // left, middle, and right subtries
        int val;                // value associated with string
    }

    public int size() {
        return N;
    }

    public int get(String key) {
        return get(root, key, 0);
    }

    private int get(Node x, String key, int d) {
        if (x == null) {
            return -1;
        }
        char c = key.charAt(d);
        if (c < x.c) {
            return get(x.left, key, d);
        } else if (c > x.c) {
            return get(x.right, key, d);
        } else if (d < key.length() - 1) {
            return get(x.mid, key, d + 1);
        } else {
            return x.val;
        }
    }

    /**
     * ***********************************************************
     * Insert string s into the symbol table.
     * ************************************************************
     */
    public void add(String s) {
        root = add(root, s, 0);
    }

    private Node add(Node x, String s, int d) {
        char c = s.charAt(d);
        if (x == null) {
            x = new Node();
            x.c = c;
            x.val = -1;
            N++;
        }
        if (c < x.c) {
            x.left = add(x.left, s, d);
        } else if (c > x.c) {
            x.right = add(x.right, s, d);
        } else if (d < s.length() - 1) {
            x.mid = add(x.mid, s, d + 1);
        } else {
            x.val = 0;
        }
        return x;
    }

    public void numberConseq(String[] intToWord) {
        StringBuilder sb = new StringBuilder();
        int[] n = new int[1];
        n[0] = 0;
        numberConseq(root, sb, n, false, intToWord);
    }

    public String numberConseq(Node root, StringBuilder prefix, int[] n,
            boolean needWord, String[] intToWord) {
        if (root == null) {
            if (needWord) {
                String longWord = prefix.toString();
                // System.out.print("{"+longWord+"}|"); 
                return longWord;
            } else {
                return null;
            }
        }
        numberConseq(root.left, prefix, n, false, intToWord);
        prefix.append(root.c);
        int saveN = -1;
        if (root.val != -1) {
            root.val = n[0];
            needWord = true;
            // System.out.println("{"+prefix.toString()+"}");
            saveN = n[0];
            n[0]++;
        }
        String longWord = numberConseq(root.mid, prefix, n, needWord, intToWord);
        if (saveN != -1) {
            intToWord[saveN] = longWord.substring(0, prefix.length());
            // System.out.println("   "+intToWord[saveN]); 
        }
        prefix.deleteCharAt(prefix.length() - 1);
        numberConseq(root.right, prefix, n, false, intToWord);
        return longWord;
    }

    /**
     * Test client.
     * @param args 
     */
    public static void main(String[] args) {

        String path = "src/main/resources/corpora/testcorpus/";


        Normalizer norm = new LowerCaseNormalize();
        Tokenizer tok = new RegexTokenizer(path + "/loadOptions/regex", norm);
        File dir = new File(path);


        ArrayList<String> files = new ArrayList<String>();
        for (String filename : dir.list()) {
            if (!filename.endsWith("~") && !filename.equals(".svn")) {
                files.add(filename);
            }
        }

        Stopwatch stop = new Stopwatch();
        List<String> texts = new ArrayList<String>();
        for (String filename : files) {
            if (!filename.equals("loadOptions") && !filename.equals("serialize")) {
                String untokenized = Slurp.slurp(path + "/" + filename);
                texts.add(untokenized);
            }
        }
        System.out.println("Time for reading " + stop.elapsedTime());

        stop = new Stopwatch();
        List<String> tokenized = new ArrayList<String>();
        for (String text : texts) {
            TokenIterator tokenize = tok.iterator(text);
            while (tokenize.hasNext()) {
                String s = tokenize.next();
                System.out.println(s);
                tokenized.add(s);
            }
        }
        System.out.println("Time for tokenizing " + stop.elapsedTime());


        for (int z = 0; z < 1; z++) {
            stop = new Stopwatch();
            String[] intToWord = new String[tokenized.size()];
            SymTable st = new SymTable();
            st.numberConseq(intToWord);
            System.out.println("Time for dedicated st: " + stop.elapsedTime());

            stop = new Stopwatch();
            int[] text1 = new int[tokenized.size()];
            int num = 0;
            for (String s : tokenized) {
                text1[num] = st.get(s);
                num++;
            }
            System.out.println("Time for converting text: "
                    + stop.elapsedTime());
        }

        for (int z = 0; z < 1; z++) {
            stop = new Stopwatch();
            String[] intToWord = new String[tokenized.size()];
            Map<String, Integer> words = new HashMap<String, Integer>();
            for (String s : tokenized) {
                words.put(s, 0);
            }
            int n = 0;
            for (String s : words.keySet()) {
                intToWord[n] = s;
                words.put(s, n);
                n++;
            }
            System.out.println("Time for generic st: " + stop.elapsedTime());

            stop = new Stopwatch();
            int[] text1 = new int[tokenized.size()];
            int num = 0;
            for (String s : tokenized) {
                words.get(s);
                text1[num] = words.get(s);
                num++;
            }
            System.out.println("Time for conversion: " + stop.elapsedTime());
        }
    }
}
