package process;

import data.Document;
import data.DocumentCollection;
import data.Term;
import data.TermCollection;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import transform.Parser;
import transform.Stopper;

/**
 * Pembaca statistik dari dokumen, dijalankan dalam sebuah thread
 * Sebuah DocumentStatisticReader berasosiasi dengan sebuah DocumentCollection 
 * yang akan diproses. Proses statistic reading akan selesai jikan crawling 
 * telah selesai dilakukan dan dokumen telah habis diproses.
 * @author Muqtafi Akhmad
 */
public abstract class DocumentStatisticsReader implements Runnable {

    public void setID(String ID) {
        this.ID = ID;
    }

    public String getID() {
        return ID;
    }

    public void setTermCollection(TermCollection termCollection) {
        this.termCollection = termCollection;
    }

    public TermCollection getTermCollection() {
        return termCollection;
    }

    public void setDocumentCollection(DocumentCollection documentCollection) {
        this.documentCollection = documentCollection;
    }

    public void setParser(Parser parser) {
        this.parser = parser;
    }

    public void setStopper(Stopper stopper) {
        this.stopper = stopper;
    }

    public DocumentCollection getDocumentCollection() {
        return documentCollection;
    }

    public Parser getParser() {
        return parser;
    }

    public Stopper getStopper() {
        return stopper;
    }
    /**
     * ID dari worker
     */
    private String ID;
    /**
     * reference document collection
     */
    private DocumentCollection documentCollection;
    /**
     * reference ke term collection
     */
    private TermCollection termCollection;
    /**
     * reference ke parser
     */
    private Parser parser;
    /**
     * reference ke stopper
     */
    private Stopper stopper;

    @Override
    public final void run() {
        System.out.println("## Start " + ID);
        // ulangi hingga dokumen habis
        while ((!documentCollection.isFinishedCollecting()) || (documentCollection.getDocumentCount() > 0)) {
            // mendapatkan dokumen dari collection
            Document processedDocument = documentCollection.getDocument();
            if (processedDocument != null) {
                if (documentCollection.getDocumentCount() % 1000 == 0) {
                    System.out.println("## " + ID + ", remaining : " + documentCollection.getDocumentCount() + " docs");
                }
                // parse dokumen
                HashMap<String, Term> documentTerms = parser.doParse(processedDocument, documentCollection.getTokenCounter());
                documentTerms = stopper.doStop(documentTerms);
                Iterator it = documentTerms.entrySet().iterator();
                // simpan term ke dalam term collection
                while (it.hasNext()) {
                    Map.Entry pairs = (Map.Entry) it.next();
                    Term term = (Term) pairs.getValue();
                    if (termCollection.isExists(term)) {
                        termCollection.merge(term);
                    } else {
                        termCollection.addTerm(term);
                    }
                }
            }
        }
        System.out.println("## Stop " + ID);
    }
}
