/*
 * Saphre - Suffix Arrays for Phrase Extraction
 * Copyright (C) 2013 
 * Dale Gerdemann - Tübingen, Germany 
 * Niko Schenk - Frankfurt am Main, Germany
 * All rights reserved.
 *
 * This program is free software: you can redistribute it and/or modify it under
 * the terms of the GNU General Public License as published by the Free Software
 * Foundation, either version 3 of the License, or (at your option) any later
 * version.
 *
 * This program is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
 * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
 * details.
 *
 * You should have received a copy of the GNU General Public License along with
 * this program. If not, see <http://www.gnu.org/licenses/>.
 *
 */
package saphre.core;

import java.io.File;
import java.io.Serializable;
import java.util.*;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import util.sedgewick.Stopwatch;
import util.tokenization_io.Slurp;
import util.tokenization_io.TokenIterator;
import util.tokenization_io.Tokenizer;
import util.IntString;
import util.IntervalSearch;
import util.tokenization_io.BoundarySymbols;
import util.Interval;

/**
 * A Store object is a container for corpus data. A Store generates & contains
 * (among other things) - the words of the corpus - their integer
 * representations (mappings) - the documnet names of the corpus - document
 * numbers - information about: sentinels, boundary symbols (used for gappy
 * phrases), etc.
 *
 * A Store object is then used to generate the suffix array.
 *
 * @authors Dale Gerdemann, Niko Schenk
 */
public class Store implements Serializable {

    // A mapping from word to an integer representation.
    private Map<String, Integer> wordToInt;
    public String corpusDir;
    public String[] intToWord;
    public int[] text;
    public String[] docNames;
    private Map<String, Integer> docNameToNum;
    public int[] tokenCounts;
    private String sentinel;
    public int smallestSentinel;
    public int largestSentinel;
    public Tokenizer tok;
    public IntervalSearch is;
    public String sep;
    private Set<Integer> boundaries;
    private BoundarySymbols tc;
    private String[] sources;
    private Map<String, List<Interval>> delMap;
    private Map<String, List<Integer>> allCuts;
    public static int select = Integer.MAX_VALUE;
    public static String[] selection = null;

    // Constructor flags.
    public enum SRC {

        DIR, FILE, STRING, STRINGS
    }

    /**
     * Constructor - Generate a Store object.
     *
     * @param corpusDir The directory containing the corpus (at least one file)
     * @param tok A tokenizer
     * @param delMap
     * @param sentinel The String used for printing sentinels
     * @param sep a separator between tokens to be used in pretty printing The
     * files in corpusDir are read in, tokenized and concatenated with a unique
     * sentinel at the end of each one. Each token is replaced by an integer, so
     * that inefficient string comparisons can be replaced with very efficient
     * integer comparisons. For example "to be or not to be" would be replaced
     * by {3,0,2,1,3,0,4}, where 0=be, 1=not, 2=or, 3=to and 4 is a sentinel.
     * This presumes that "tokens" are anything separated by whitespace. The
     * tokenizer is modular, however, and can be changed to anything the user
     * desires.
     */
    public Store(String corpusDir, Tokenizer tok,
            Map<String, List<Interval>> delMap,
            String sentinel, String sep) {
        this.delMap = delMap;
        init(SRC.DIR, corpusDir, null, tok, sentinel, sep);
        this.corpusDir = corpusDir;
    }

    /*
     * Initialize the Store.
     */
    private void init(SRC srcFlag, String source, String[] stringCorpus,
            Tokenizer tok, String sentinel, String sep) {

        this.tok = tok;
        this.sentinel = sentinel;
        this.sep = sep;
        this.boundaries = new HashSet<Integer>();
        sources = null;
        corpusDir = null;
        if (srcFlag == SRC.FILE || srcFlag == SRC.STRING) {
            sources = new String[1];
            sources[0] = source;
        } else if (srcFlag == SRC.STRINGS) {
            sources = stringCorpus;
        } else if (srcFlag == SRC.DIR) {
            corpusDir = source;
            if (!corpusDir.endsWith("/")) {
                corpusDir += "/";
            }
            //System.out.println(corpusDir);


            File corpusdir = new File(corpusDir);
            ArrayList<String> sources0 = new ArrayList<String>();
            System.out.print("Collecting text files...");
            Slurp.collectTextFiles(sources0, corpusdir.getAbsolutePath());
            System.out.println(" done.");
            System.out.print("Indexing text files...");


            String[] typeSpec = new String[0];
            sources = sources0.toArray(typeSpec);
            if (selection != null) {
                sources = selection;
            } else if (select < sources.length) {
                Random rand = new Random();
                String[] sources1 = new String[select];
                for (int s = 0; s < select; s++) {
                    int idx = rand.nextInt(sources.length - s);
                    sources1[s] = sources[idx];
                    sources[idx] = sources[sources.length - s - 1];
                }
                sources = sources1;
                selection = sources1;
            }
            Arrays.sort(sources);
            System.out.println(" done.");
            if (srcFlag != SRC.STRINGS) {
                String corpusFiles = Arrays.toString(sources);
                // Only print corpus files if they "fit" to the screen.
                // TODO: Improve stuff here.
                if (corpusFiles.length() < 50) {
                    System.out.println(corpusFiles);
                }
                System.out.println("Indexed " + sources.length + " files.");
            }
        }
        System.out.println("\nTokenizing corpus (converting tokens to integers)...");

        // number of documents counting the initial
        // sentinel as a documents
        int D = sources.length + 2;

        // Record the name of each file and the number of new tokens in
        // the file.
        docNames = new String[D];    // This could make some very long doc names
        docNames[0] = "InitialSentinel";
        docNames[D - 1] = "FinalSentinel";
        for (int i = 0; i < sources.length; i++) {
            docNames[i + 1] = sources[i];
        }


        docNameToNum = new TreeMap<String, Integer>();
        tokenCounts = new int[D];

        for (int i = 0; i < docNames.length; i++) {
            docNameToNum.put(docNames[i], i);
        }

        //System.out.println( docNameToNum);

        // Initial count of number of tokens in corpus
        int numTokens = 0;

        // SymTable wordToInt = new SymTable();
        wordToInt = new TreeMap<String, Integer>();

        allCuts = new TreeMap<String, List<Integer>>();

        // This pass through the corpus is just to get a word list
        List<List<String>> tokseqs = new ArrayList<List<String>>();
        Stopwatch stop = new Stopwatch();
        for (String sourcename : sources) {
            // System.out.println( sourcename);
            List<String> tokseq = new ArrayList<String>();
            tokseqs.add(tokseq);
            String s = sourcename;

            if (srcFlag == SRC.DIR) {
                //s = Slurp.slurp(corpusDir + sourcename);
                s = Slurp.slurpIt(sourcename);
            } else if (srcFlag == SRC.FILE) {
                //s = Slurp.slurp(sourcename);
                s = Slurp.slurpIt(sourcename);
            }
            // Iterator<String> tokenize = tok.iterator(s, false);
            //RegexTokenizer.fileName(sourcename);
            TokenIterator tokenize = tok.iterator(s);
            // System.out.println( tokenize.hasNext());

            if (delMap == null) {
                delMap = new TreeMap<String, List<Interval>>();
            }
            if (delMap.get(sourcename) == null) {
                List<Interval> x = new ArrayList<Interval>();
                delMap.put(sourcename, x);
            }

            int[] changes = new int[2 * delMap.get(sourcename).size() + 1];
            int idx = 0;
            for (Interval itr : delMap.get(sourcename)) {

                changes[idx++] = itr.lb();
                changes[idx++] = itr.rb();
            }
            changes[changes.length - 1] = Integer.MAX_VALUE;
            idx = 0;
            int localTokens = 0;
            boolean keep = true;
            List<Integer> cuts = new ArrayList<Integer>();
            cuts.add(0);

            while (tokenize.hasNext()) {
                String token = tokenize.next();
                if (localTokens++ == changes[idx]) {
                    // System.out.println( localTokens + " " + changes[idx]);
                    int x = tokenize.start();
                    if (x == -1) {
                        cuts.add(s.length());
                    } else {
                        cuts.add(x);
                    }
                    keep = !keep;
                    // System.out.println( tokenize.start());
                    //System.out.println("\n");
                    idx++;
                    // this is in case the intervals overlap
                    changes[idx] = Math.max(changes[idx], changes[idx - 1] + 1);
                }

                // System.out.println( token);
                if (keep) {
                    //System.out.println(token);
                    numTokens++;
                    wordToInt.put(token, 0);
                    tokseq.add(token);
                }
            }
            //System.out.println(sourcename + ": " + cuts + " end:" + s.length());
            allCuts.put(corpusDir + sourcename, cuts);
        }

        System.out.println("Tokenization: " + numTokens + " tokens in "
                + stop.elapsedTime() + " seconds");

        // Make a symbol table for the words. 
        // Don't include sentinels in this table
        intToWord = new String[wordToInt.size()];
        int num = 0;
        for (String w : wordToInt.keySet()) {
            wordToInt.put(w, num);
            intToWord[num] = w;
            num++;
        }
        int sentinelNum = num;
        smallestSentinel = num;

        /*
         while (num < intToWord.length) {
         intToWord[num] = sentinel;
         sentinels.add(num);
         num++;
         }
         */

        // now make a second pass through the text to replace each
        // word with an int. This is repetitive, but it helps
        // conceptually to collect all the words into a Set so that
        // the int's assigned to these words are ordered the same as
        // the words are ordered lexicographically. Also, now every
        // be used without
        // checking for existence.
        //
        // The text is extended to allow for sentinels;
        text = new int[numTokens + D];

        // The array 'text' is a concatenation of the tokenized texts
        // of the corpus (separated by sentinels). Given an index into
        // 'text', it is important to be able to identify the original
        // corpus text for the indexed token. This task is performed
        // by the IntervalSearch class.
        is = new IntervalSearch(D);
        is.put(0, 0);

        int doc = 0;
        numTokens = 1;            // Start at 1 for 1-based indexing

        for (List<String> tokseq : tokseqs) {
            int prev = numTokens;
            Iterator<String> tokenIterate = tokseq.iterator();
            int numTokensInThisFile = 0;
            while (tokenIterate.hasNext()) {
                String word = tokenIterate.next();
                numTokensInThisFile++;
                text[numTokens++] = wordToInt.get(word);
            }

            // The last token for each file is a sentinel
            is.put(prev, numTokens);   // IntervalSearch
            text[numTokens++] = sentinelNum++;
            tokenCounts[doc] = numTokensInThisFile;
            doc++;
        }
        text[numTokens++] = sentinelNum++;
        largestSentinel = sentinelNum - 1;

        // Put an even larger sentinel at the beginning. Since we use
        // 1-based indexing, this sentinel is normally not even
        // seen. But it's useful for cases where one wants to look
        // backwards without dealing with special cases. For example,
        // the bwttab (Abouelhoda et al) looks backward. Also, one may
        // want to look at the text from the other direction, to
        // construct a "prefix array".
        text[0] = largestSentinel + 1;
        // System.out.println( Arrays.toString(text));
    }

    /**
     * Separate constructor where the corpus is specified by a set of strings
     * instead of a directory containing a set of files. Presently used only by
     * AhoCorasickAutomaton, but generally useful for small or automatically
     * generated examples.
     *
     * @param corpus
     * @param tok
     * @param sentinel
     * @param sep
     */
    public Store(String[] corpus, Tokenizer tok, String sentinel, String sep) {
        init(SRC.STRINGS, null, corpus, tok, sentinel, sep);
    }

    /**
     * Separate constructor where the corpus is specified by a list of
     * IntStrings.
     *
     * @param phrases
     * @param phraseNames
     * @param store0
     */
    public Store(List<IntString> phrases, List<String> phraseNames,
            Store store0) {

        sentinel = store0.sentinel;
        smallestSentinel = store0.smallestSentinel;
        tok = store0.tok;
        sep = store0.sep;
        boundaries = new TreeSet<Integer>();
        tc = new BoundarySymbols();


        wordToInt = store0.wordToInt;
        intToWord = store0.intToWord;
        int D = phraseNames.size() + 1;
        docNames = new String[D];
        docNames[0] = "InitialSentinel";
        docNames[D - 1] = "Final Sentinel";
        for (int i = 0; i < phraseNames.size(); i++) {
            docNames[i + 1] = "" + (i + 1) + ":" + phraseNames.get(i);
        }

        docNameToNum = new TreeMap<String, Integer>();
        tokenCounts = new int[D];
        for (int i = 0; i < docNames.length; i++) {
            docNameToNum.put(docNames[i], i);
        }

        int numTokens = 0;
        for (IntString s : phrases) {
            numTokens += s.length();
        }
        text = new int[numTokens + D];
        int sentinelNum = store0.getLargestSentinel();
        int idx = 0;
        //text[idx++] = sentinelNum;
        is = new IntervalSearch(D);
        is.put(0, 0);
        int doc = 0;
        idx = 1;            // Start at 1 for 1-based indexing

        for (IntString phrase : phrases) {
            int prev = idx;
            for (int i = 0; i < phrase.length(); i++) {
                text[idx++] = phrase.charAt(i);
            }

            // The last token for each file is a sentinel
            is.put(prev, idx);   // IntervalSearch
            text[idx++] = sentinelNum++;

            tokenCounts[doc] = phrase.length();
            doc++;
        }
        text[idx++] = sentinelNum++;
        largestSentinel = sentinelNum - 1;
        text[0] = largestSentinel + 1;
    }

    /**
     * Get the alphabet associated with this Store object.
     *
     * @return The alphabet Does not include sentinels since sentinels don't
     * have unique string representation
     */
    public Set<String> sigma() {
        return wordToInt.keySet();
    }

    /**
     * Get the alphabet associated with this Store object.
     *
     * @return
     */
    public Set<String> alphabet() {
        return wordToInt.keySet();
    }

    /**
     * Get alphabet size associated with this Store object.
     *
     * @return
     */
    public int alphabetSize() {
        return wordToInt.keySet().size();
    }

    /**
     * Get alphabet ids associated with this Store object.
     *
     * @return The alphabet with id's Does not include sentinels since sentinels
     * don't have unique string representation
     */
    public Set<Map.Entry<String, Integer>> sigmaId() {
        return wordToInt.entrySet();
    }

    /**
     *
     * @return
     */
    public Set<Map.Entry<String, Integer>> sigmaIdFiltered() {
        Set<Map.Entry<String, Integer>> result =
                new TreeSet<Map.Entry<String, Integer>>();
        for (Map.Entry<String, Integer> e : wordToInt.entrySet()) {
            if (false || !tc.userSentinel(e.getValue().intValue())) { // deactivated
                result.add(e);
            }
        }
        return result;
    }

    /**
     * Get document names associated with this Store object.
     *
     * @return An ordered array holding the names of the docments.
     */
    public String[] getDocNames() {
        return docNames;
    }

    /**
     * Get the tokenizer associated with this Store object.
     *
     * @return The tokenizer
     */
    public Tokenizer getTokenizer() {
        return tok;
    }

    /**
     * Get the smallest sentinel associated with this Store object.
     *
     * @return tokID of the smallest sentinel. The sentinels are tokID's used to
     * separate the texts in the corpus.
     */
    public int getSmallestSentinel() {
        return smallestSentinel;
    }

    /**
     * Get the sentinel string associated with this Store object.
     *
     * @param sent
     */
    public void setSentinelString(String sent) {
        sentinel = sent;
    }

    /**
     * Get the largest sentinel associated with this Store object.
     *
     * @return tokID of the largest sentinel (at end of text)
     */
    public int getLargestSentinel() {
        return largestSentinel;
    }

    /**
     * Check if a token is a sentinel associated with this Store object.
     *
     * @param s An int
     * @return true if s corresponds to a sentinel
     */
    public boolean isSentinel(int s) {
        return s >= smallestSentinel;
    }

    /**
     * Get the alphabet size associated with this Store object.
     *
     * @return Size of alphabet, including sentinels.
     */
    public int alphaSize() {
        return largestSentinel + 1;
    }

    /**
     * Convert a String token to an int token.
     *
     * @param w A token (String)
     * @return The int associated with the input token
     */
    public int wordToInt(String w) {
        return wordToInt.get(w);
    }

    /**
     *
     * @param w
     * @return
     */
    public int toInt(String w) {
        return wordToInt.get(w);
    }

    /**
     * Like wordToInt with rather basic error checking: error message printed
     *
     * @param w A token (String)
     * @return The int associated with the input token
     */
    public int wordToIntChecked(String w) {
        if (wordToInt.containsKey(w)) {
            return wordToInt.get(w);
        } else {
            return -1;
        }
    }

    /**
     * Get the corpus text associated with this Store object.
     *
     * @return The encoded text, array of tokID (token ID's)
     */
    public int[] text() {
        return text;
    }

    /**
     * Get the number of the document given a position in the corpus associated
     * with this Store object.
     *
     * @param pos A position in the text
     * @return The number of the document containing text()[pos] Note that a
     * Dict is constructed from a set of files. Each of this files is assigned a
     * document number.
     */
    public int docNum(int pos) {
        return is.bsearch(pos);
    }

    /**
     *
     * @param pos
     * @return
     */
    public int getOffsetInDoc(int pos) {
        return is.lo[docNum(pos)];
    }

    /**
     * Get the document name of the corpus given a position in the corpus
     * associated with this Store object.
     *
     * @param pos A position in the text
     * @return The name of the document containing text()[pos]
     */
    public String docNameOfPos(int pos) {
        return docName(docNum(pos));
    }

    /**
     * Get the name of the document given a document number.
     *
     * @param docNum The number of a document
     * @return The filename of the document
     */
    public String docName(int docNum) {
        try {
            return docNames[docNum];
        } catch (Exception e) {
            return e.getMessage();
        }
    }

    /**
     * Get the number of documents associated with this Store object.
     *
     * @return The number of documents (files) in the corpus
     */
    public int numDocs() {
        return docNames.length;
    }

    /**
     *
     * @return
     */
    public Map<String, List<Integer>> cuts() {
        return allCuts;
    }

    /**
     * Get the number of tokens associated with this Store object.
     *
     * @return The number of tokens in the corpus, not counting sentinels.
     */
    public int numTokens() {
        int result = 0;
        for (int i = 0; i < tokenCounts.length; i++) {
            result += tokenCounts[i];
        }
        return result;
    }

    /**
     *
     * @param pat The Pattern that the filename needs to match
     * @return The number of tokens in files with the filename matching a
     * specified pattern. This is useful for classification tasks where the
     * class of a text is encoded in the filename of the text..
     */
    public int numTokens(Pattern pat) {
        int result = 0;
        for (int i = 0; i < tokenCounts.length; i++) {
            Matcher m = pat.matcher(docName(i));
            if (m.matches()) {
                result += tokenCounts[i];
            }
        }
        return result;
    }

    /**
     * Method to categorize the documents according to information encoded in
     * the filename. pattern[k] is a regex that matches filenames of files in
     * the kth category.
     *
     * @param patterns array of compiled regular expressions
     * @return an array of length equal to the number of documents, where
     * position i stores the category number of document i.
     */
    public int[] docCategories(Pattern[] patterns) {
        int[] result = new int[docNames.length];
        for (int i = 0; i < result.length; i++) {
            String docName = docName(i);
            for (int j = 0; j < patterns.length; j++) {
                Matcher m = patterns[j].matcher(docName);
                if (m.matches()) {
                    result[i] = j;
                    break;
                }
                result[i] = -1;
            }
        }
        return result;
    }

    /**
     *
     * @param i The number for a particular word (a tokId)
     * @return The word (token). All sentinels are printed out using the
     * sentinel string specified when the constructor is called.
     */
    public String toString(int i) {
        if (i == largestSentinel + 1) {  // Why did I make this special case?
            return " ";
        }
        if (isSentinel(i)) {
            return sentinel;
        }
        return intToWord[i];
    }

    /**
     *
     * @param s A untokenized text
     * @return Sequence of int using existing token↔int map. An error message is
     * printed for unknown words.
     */
    public int[] stringToInts(String s) {
        TokenIterator tokenize = tok.iterator(s);
        ArrayList<String> toks = new ArrayList<String>();
        while (tokenize.hasNext()) {
            String str = tokenize.next();
            toks.add(str);
        }
        int[] result = new int[toks.size()];
        for (int i = 0; i < result.length; i++) {
            result[i] = wordToIntChecked(toks.get(i));
        }
        return result;
    }

    /**
     *
     * @param begin Beginning position in the corpus
     * @param end one-past-the-end position in the corpus
     * @return A sequence from the corpus, converted into a string
     */
    public String toString(int begin, int end) {
        StringBuilder sb = new StringBuilder();

        for (int i = begin; i < end - 1; i++) {
            sb.append(toString(text[i])).append(sep);
        }
        // Last word treated separately to avoid fencepost problem
        // with adding one too many separators;
        if (begin < end && end - 1 < text.length) {
            sb.append(toString(text[end - 1]));
        }
        return sb.toString();
    }

    /**
     *
     * @param begin
     * @param length
     * @return
     */
    public String toStringL(int begin, int length) {
        return toString(begin, begin + length);
    }

    /**
     *
     * @param end
     * @param length
     * @return
     */
    public String toStringLRev(int end, int length) {
        return toString(end + 1 - length, end + 1);
    }

    /**
     *
     * @param begin
     * @param length
     * @return
     */
    public int[] toArrayL(int begin, int length) {
        return toArray(begin, begin + length);
    }

    /**
     *
     * @param end
     * @param length
     * @return
     */
    public int[] toArrayLRev(int end, int length) {
        return toArray(end + 1 - length, end + 1);
    }

    /**
     *
     * @param begin the begin index
     * @param end the one-past-the-end index
     * @return the sequence of token ID's from the text in the specified
     * interval
     */
    public int[] toArray(int begin, int end) {
        int[] result = new int[end - begin];

        int j = 0;
        for (int i = begin; i < end; i++) {
            result[j++] = text[i];
        }
        return result;
    }

    /**
     *
     * @param begin
     * @param end
     * @return
     */
    public IntString toIntString(int begin, int end) {
        int[] arr = toArray(begin, end);
        return new IntString(arr);
    }

    /**
     *
     * @param begin
     * @param end
     * @return
     */
    public List<Integer> toList(int begin, int end) {
        List<Integer> result = new ArrayList<Integer>(end - begin);
        for (int i = begin; i < end; i++) {
            result.add(text[i]);
        }
        return result;
    }

    /**
     * @param text A text represented as a sequence of integers (tokID's)
     * @return The text, converted into a string, the sequence of tokens
     * separated by the String stored in the instance field <code>sep</code>,
     * which is instantiated by the constructor.
     */
    public String toString(int[] text) {
        StringBuilder sb = new StringBuilder();
        int start = 0;
        if (text.length == 0) {
            return "";
        }
        if (text[0] == -1) {
            start = 1;
        }
        for (int i = start; i < text.length - 1; i++) {
            sb.append(toString(text[i])).append(sep);
        }
        // Last word treated separately to avoid fencepost problem
        // with adding one too many separators;
        sb.append(toString(text[text.length - 1]));
        return sb.toString();
    }

    /**
     *
     * @param text
     * @return
     */
    public String toString(IntString text) {
        return toString(text.toArray());
    }

    /**
     * Variant of above
     *
     * @param text
     * @return
     */
    public String toString(List<Integer> text) {
        StringBuilder sb = new StringBuilder();
        int start = 0;
        if (text.size() == 0) {
            return "";
        }
        if (text.get(0) == -1) {
            start = 1;
        }
        for (int i = start; i < text.size() - 1; i++) {
            sb.append(toString(text.get(i))).append(sep);
        }
        // Last word treated separately to avoid fencepost problem
        // with adding one too many separators;
        sb.append(toString(text.get(text.size() - 1)));
        return sb.toString();
    }

    /**
     * @return The whole corpus, converted back into a string. More precisely,
     * the sequence of tokens separated by the String stored in the instance
     * field <code>sep</code>.
     */
    @Override
    public String toString() {
        return toString(text);
    }

    /**
     * A "boundary" is a token that is not allowed to occur in the bridge part
     * between the left part and right part of a gappy phrase.
     *
     * @param regexFile filename of file containing regex which matches tokens
     * which should be boundaries.
     */
    public void addBoundaries(String regexFile) {
        Pattern pat = Pattern.compile(Slurp.slurp(regexFile), Pattern.COMMENTS);
        for (int i = 0; i < intToWord.length; i++) {
            Matcher m = pat.matcher(intToWord[i]);
            if (m.matches()) {
                boundaries.add(i);
            }
        }
    }

    /**
     * a
     * A "boundary" is a token that is not allowed to occur in the bridge part
     * between the left part and right part of a gappy phrase. These are the
     * tokens matched by the regex specified by
     * <code>addBoundaries</code> and also sentinels.
     *
     * @param tokID
     * @return true if <code>tokID</code> is a "boundary".
     */
    public boolean isBoundary(int tokID) {
        return isSentinel(tokID) || boundaries.contains(tokID);
    }

    /**
     * Set the boundary symbols of this Store object.
     *
     * @param tc
     */
    public void setTC(BoundarySymbols tc) {
        this.tc = tc;
    }

    /**
     * Get the boundary symbols associated with this Store object.
     *
     * @return
     */
    public BoundarySymbols getTC() {
        return tc;
    }

    /**
     * Get corpus documents associated with this Store object.
     *
     * @return
     */
    public String[] getSources() {
        return sources;
    }

    /**
     * Get the sentinel associated with this Store object.
     *
     * @return
     */
    public String getSentinelString() {
        return sentinel;
    }
}
