package arm_search_3.crawling.email;

import arm_search_3.crawling.Crawler;
import arm_search_3.data.Document;
import arm_search_3.data.DocumentCollection;
import arm_search_3.data.Seed;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.Queue;

/**
 * Email crawler adalah crawler yang membaca file email dari directory lokal dan
 * mengembalikan EmailDocument yang ditemui
 *
 * @author Robert Gunawan
 */
public class EmailCrawler extends Crawler {

    /**
     * document ID terakhir
     */
    public Integer lastDocID;
    /**
     * antrian seeds
     */
    private Queue<Seed> seeds;

    /**
     * konstruktor email crawler
     */
    public EmailCrawler() {
        seeds = new LinkedList<Seed>();
        lastDocID = -1;
        documentMapping = new HashMap<Integer, String>();
    }

    /**
     * mengembalikan document ID baru
     *
     * @return document ID terakhir
     */
    public synchronized Integer getNewDocID() {
        lastDocID++;
        return lastDocID;
    }

    /**
     * melakukan crawling file email dengan breadth-first
     *
     * @param seed root seed
     * @param seedNameFilter filter
     */
    @Override
    public void crawl(Seed seed, ArrayList<String> seedNameFilter) {
        Integer tempIter = 0;
        // menambahkan seed
        seeds.add(seed);
        // ketika masih ada seed tersisa, lakukan get document
        while (!seeds.isEmpty()) {
            DocumentCollection currCollection = this.getDocument();
            Seed currProcessedSeed = seeds.poll();
            if (!seedNameFilter.contains(currProcessedSeed.getSeedName())) {
                if (this.getCrawledCount() > tempIter) {
                    tempIter += 1000;
                    System.out.println("## Crawled " + this.getCrawledCount() + " documents");
                }
                ArrayList<Document> addedDocuments = currProcessedSeed.getDocuments();
                this.setCrawledCount(this.getCrawledCount() + addedDocuments.size());
                // tambahkan crawled documents ke dalam document collection
                currCollection.addDocuments(addedDocuments);
                // menambahkan seed yang didapat dari seed sebelumnya
                seeds.addAll(currProcessedSeed.getNextSeeds());
            }
            this.addDocument(currCollection);
        }
        this.allFinish();
    }
}
