package com.saret.crawler;


import com.google.inject.Inject;
import com.google.inject.name.Named;
import com.saret.crawler.parser.GeezParser;
import com.saret.crawler.parser.TxtParser;
import com.saret.crawler.search.DownloadPage;
import com.saret.crawler.search.WebSearch;
import com.saret.crawler.type.GUrl;
import com.saret.crawler.type.GUrlImpl;
import com.saret.utils.FileLocator;
import com.saret.utils.UtfFileHandle;

import java.io.File;
import java.io.IOException;
import java.util.List;
import java.util.Set;
import java.util.TreeSet;

/**
 * User: biniam.gebremichael
 * Date: 2/3/11
 */
public class Crawler {
    private WebSearch webSearch;
    private File seedFile;
    private File urlFile;
    private DownloadPage downloadPage;
    private final String postFix;
    private Set<GUrl> urls = new TreeSet<GUrl>();
    TxtParser txtParser;

    @Inject
    public Crawler(WebSearch webSearch, DownloadPage downloadPage, GeezParser txtParser,
                   @Named("saret.crawler.seed.good") String seedFile,
                   @Named("saret.crawler.current.url") String urlFile,
                   @Named("saret.crawler.filter") String postFix) {
        this.webSearch = webSearch;
        this.downloadPage = downloadPage;
        this.seedFile = FileLocator.getConfigFile(seedFile.split("/"));
        this.urlFile = FileLocator.getConfigFile(urlFile.split("/"));
        this.postFix = postFix == null || postFix.length() < 2 ? "" : " " + postFix;
        this.txtParser = txtParser;
    }

    private void loadOldUrl() throws IOException {
        List<String> urlStrings = UtfFileHandle.readFileToListOfLines(urlFile);
        for (String s : urlStrings) {
            String[] typeNurl = s.split("\t");
            urls.add(new GUrlImpl(typeNurl[1], typeNurl[0]));
        }
    }

    public void crawl() throws IOException {
        System.err.println("loading urlFile = " + urlFile);
        loadOldUrl();

        Set<GUrl> searchUrl = search();
        download(searchUrl);

        System.err.println("saving " + urls.size() + "urls into " + urlFile);
        storeUrl();
        txtParser.start();
    }

    public void download(Set<GUrl> searchUrl) {
        long counter = 1;
        for (GUrl gUrl : searchUrl) {
            if ("html".equals(gUrl.getType())) {
                downloadPage.downloadAsText(gUrl.getUrl(), "" + counter + ".txt");
            } else if ("pdf".equals(gUrl.getType())) {
                downloadPage.downloadAsText(gUrl.getUrl(), "" + counter + "." + gUrl.getType());
            }
            counter++;
        }
    }

    public Set<GUrl> search() throws IOException {
        final Set<GUrl> searchUrl = new TreeSet<GUrl>();

        List<String> seeds = UtfFileHandle.readFileToListOfLines(seedFile);
        for (String seed : seeds) {
            String searchKey = seed + postFix;
            List<GUrl> results = webSearch.search(searchKey);
            if (results == null) {
                System.err.println("hmmm!, Didn't got any URL for " + searchKey);
            } else {
                searchUrl.addAll(results);
                System.err.println(results.size() + " for " + searchKey);
            }
        }

        return searchUrl;
    }

    private void storeUrl() throws IOException {
        UtfFileHandle.write(urlFile, urls.toArray());
    }

}
