/*
 * Saphre - Suffix Arrays for Phrase Extraction
 * Copyright (C) 2013 
 * Dale Gerdemann Tübingen, Germany 
 * Niko Schenk Frankfurt am Main, Germany
 * All rights reserved.
 *
 * This program is free software: you can redistribute it and/or modify it under
 * the terms of the GNU General Public License as published by the Free Software
 * Foundation, either version 3 of the License, or (at your option) any later
 * version.
 *
 * This program is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
 * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
 * details.
 *
 * You should have received a copy of the GNU General Public License along with
 * this program. If not, see <http://www.gnu.org/licenses/>.
 *
 */
package collectors.impl;

import java.io.PrintWriter;
import util.sorting.Multiset;
import collectors.api.Collector1;
import util.Interval;
import saphre.core.Store;
import saphre.core.SuffixArray;


/**
 * Experimental! Do not use.
 * 
 * Class which simulates lcp-interval tree traversal to collect keywords in context
 * for a given search query.
 * 
 * It implements the Collector interface.
 *
 * @author Niko Schenk
 */
public class ConcordancePrintCollector implements Collector1 {

    //
    //
    Store store;
    SuffixArray sa;
    double D;
    Interval top;
    PrintWriter pw = null;
    String query = "dummy";
    int minLengthOfPhrase = 1;
    int maxLengthOfPhrase = 50;
    int minTermFrequency = 1;

    public ConcordancePrintCollector(Store store, SuffixArray sa, Interval top, PrintWriter pw, String[] args) {
        this.store = store;
        this.sa = sa;
        //D = (double) store.numDocs();
        // Subtract 2! Cf. initial and final sentinels.
        D = (double) store.numDocs() - 2;
        this.top = top;
        this.pw = pw;
        this.query = args[2];
    }

    public void addPrefixArray(SuffixArray pa) {
    }

    public void preAdd(Interval inter, int parentLcp, int lcp, int depth) {
        //
        // System.out.println("auxiliary node-> " + inter.lb() + " " + inter.rb());
    }

    @Override
    public void add(Interval inter, int lcp, int depth, Multiset docDist,
            Multiset leftContext) {


        Interval testinterval = sa.search("a", store);
        System.out.println(testinterval);



        Multiset dd = inter.docDist(sa, store);
        int tf = inter.tf();
        int df = dd.size();


        // System.out.println("real node-> " + inter.lb() + " " + inter.rb());
        // Limit on the length of phrase.
        if (lcp >= (minLengthOfPhrase) && lcp <= maxLengthOfPhrase && tf >= minTermFrequency) {

            int loc = sa.getSuftab()[inter.lb()];
            String ngram = store.toString(loc, (loc + lcp));

            //System.out.println("--> " + ngram + " query: " + query);
            // ngram's tokens are separated by " " (defined in Store.java) toString().

            boolean startsWithQuery = ngram.replace(" ", "").startsWith(query);
            boolean equalsQuery = ngram.replace(" ", "").equals(query);


            if (equalsQuery) {
                printKwic(ngram, loc, lcp, inter);
            }
        }
    }
    // 

    public void addTrivial(Interval inter, int lcp, int depth,
            Multiset docDist, Multiset leftContext) {
        if (lcp == 0) {

            Multiset dd = inter.docDist(sa, store);
            int tf = inter.tf();
            int df = dd.size();
            //double ridf2 = ridf(tf, dd, D);
            int loc = sa.getSuftab()[inter.lb()];
            // Check if it is maximal.
            int unipat[] = new int[1];
            unipat[0] = sa.getText()[loc];


            String ngram = store.toString(loc, (loc + (lcp + 1))); // 


        }
    }

    public void printKwic(String ngram, int loc, int lcp, Interval inter) {
        System.out.println("ngram: " + ngram);
        System.out.println("query: " + query);

        int ngramsize = loc - (loc - lcp); 
        int[] generalngram = new int[ngramsize];
        int ngramindex = 0;
        // Collect this ngram. Every array index is occupied by an individual token/character.
        for (int l = 0; l < ngramsize; l++) {
            generalngram[l] = sa.getText()[loc + ngramindex];
            ngramindex++;
        }


        System.out.println("Interval: " + inter.toString()
                + " docdist: " + inter.docDist(sa, store));

        for (int tfCnt = 0; tfCnt < inter.tf(); tfCnt++) {
            // Start index of a repeat.
            System.out.println("start index: " + sa.getSuftab()[inter.lb() + tfCnt]);
            // x to the left and x + length of repeat to the right.
            int startIndex = sa.getSuftab()[inter.lb() + tfCnt];
            // loc - leftcontext länge . TODO: Check index out of bounds.
            String ngram2 = store.toString(startIndex - 1, (startIndex + lcp + 4));
            System.out.println("kwic: " + ngram2);
        }

        pw.print("dummy" + "\t");
        pw.print("\n");
    }
}