/*
 * To change this template, choose Tools | Templates
 * and open the template in the editor.
 */
package utfpr.cp.crawler;

import edu.uci.ics.crawler4j.crawler.Page;
import edu.uci.ics.crawler4j.crawler.WebCrawler;
import edu.uci.ics.crawler4j.parser.HtmlParseData;
import edu.uci.ics.crawler4j.parser.TextParseData;
import edu.uci.ics.crawler4j.url.URLCanonicalizer;
import edu.uci.ics.crawler4j.url.WebURL;
import java.text.DateFormat;
import java.text.SimpleDateFormat;
import java.util.Date;
import java.util.HashSet;
import java.util.regex.Pattern;
import javax.persistence.EntityManager;
import org.apache.http.Header;
import org.apache.http.HttpStatus;
import utfpr.cp.model.DAO;
import utfpr.cp.model.ResourceData;
import utfpr.cp.model.ResourceDataDAO;
import utfpr.cp.model.URLData;
import utfpr.cp.model.URLDataDAO;

/**
 *
 * @author COINF-PROFESSOR-07
 */
public class SemanticCrawler extends WebCrawler {

    private static long lastTime = System.currentTimeMillis();
    private static int counter = 0;
    private final static Pattern FILTERS = Pattern.compile(".*(\\.(bmp|gif|jpe?g"
            + "|png|tiff?|mid|mp2|mp3|mp4"
            + "|wav|avi|mov|mpeg|ram|m4v|pdf"
            + "|rm|wmv|swf|wma|zip|rar|gz|"
            + "doc|docx|xls|ppt|pptx|rtf))$");

    private static final HashSet<String> domains = new HashSet<>();
    private static int blockedpages;
    private static DateFormat dateFormat = new SimpleDateFormat("dd/MM HH:mm:ss");
    
    public static void clearURL() { 
        domains.clear();
    }
    
    public static void addURL(String url) {
        WebURL u = new WebURL();
        String canonicalUrl = URLCanonicalizer.getCanonicalURL(url);
        if (canonicalUrl != null) {
            u.setURL(canonicalUrl);
            domains.add(u.getDomain());
        }
    }
    
    int thread;
    CrawlerStats stats;
    EntityManager em;
    ResourceDataDAO resourceDAO;
    URLDataDAO urlDAO;
    public SemanticCrawler(){ 
        em = DAO.getEntity();
        resourceDAO = new ResourceDataDAO(em);
        urlDAO = new URLDataDAO(em);
        thread = ++counter;
        blockedpages = 0;
        stats = new CrawlerStats(thread);
        CrawlerStats.register(stats);
    }
    
    /**
     * You should implement this function to specify whether the given url
     * should be crawled or not (based on your crawling logic).
     *
     * @param url The URL to be downloaded
     * @return boolean
     */
    @Override
    public boolean shouldVisit(WebURL url) {
        stats.addTotalProcessedURL();
        if (!domains.isEmpty() && !domains.contains(url.getDomain())) {
            //Verify if need control only domains and if has domain in the list
            return false;
        }

        ResourceData uri = resourceDAO.getByUrl(url.getURL());
        if (uri.exist()) {
            //A cada 10 paginas bloqueadas em seguidas, libera o download em busca de novos links
            if (blockedpages > 9) {
                return true;
            }
            blockedpages++;
            return false;
        }

        String href = url.getURL().toLowerCase();
        return !FILTERS.matcher(href).matches();
    }

    public void debug(String msg) {
        Date date = new Date();
        System.out.println(dateFormat.format(date) + " ["+thread+"] "  + msg);
    }
    
    /**
     * This function is called when a page is fetched and ready to be processed
     * by your program.
     *
     * @param page The page downloaded
     */
    @Override
    public void visit(Page page) {
        stats.addTotalVisitedPages();
        String url = page.getWebURL().getURL();
        if (url.length() > 1000) {
            debug("Skipping URL to long: " + url);
            return;
        }
        
        blockedpages = 0;
        URLData ownerURL = urlDAO.getOrCreate(page.getWebURL());
        
        ResourceData request = resourceDAO.getByUrl(url);
        if (request.exist()) {
            debug("Skipping URL: " + url + " alread donwloaded: " + request.getId());
            return;
        }
        
        if (page.getParseData() instanceof HtmlParseData) {
            HtmlParseData htmlParseData = (HtmlParseData) page.getParseData();
            String html = htmlParseData.getHtml();
            HashSet<URLData> links = new HashSet<>();
            for (WebURL link : htmlParseData.getOutgoingUrls()) {
                if (link.getURL().length() > 999) continue;
                links.add(urlDAO.getOrCreate(link));
            }
            
            request = new ResourceData(ownerURL, htmlParseData.getHtml(), links);
        } else if (page.getParseData() instanceof TextParseData) {
            TextParseData txt = (TextParseData) page.getParseData();
            request = new ResourceData(ownerURL, txt.getTextContent());
        } else {
            debug("Skipped binary file to URL: " + url + "...");
            return;
        }

        Header[] responseHeaders = page.getFetchResponseHeaders();
        if (responseHeaders != null) {
            for (Header header : responseHeaders) {
                request.addHeader(header.getName(), header.getValue());
            }
        }
        
        if (DAO.save(request, em)) {
            debug("Saving OK: " + url + "\n        " + request);
            stats.addTotalSavedPages();
            
            stats.setLastTime(System.currentTimeMillis() - lastTime);
            lastTime = System.currentTimeMillis();
        } else {
            em.close();
            em = DAO.getEntity();
            resourceDAO = new ResourceDataDAO(em);
            urlDAO = new URLDataDAO(em);
            stats.addDatabaseErrors();
            debug("FAIL to save: " + url + "\n");
        }
    }
    
    @Override
    public Object getMyLocalData() {
        return stats;
    }
    
    @Override
        protected void handlePageStatusCode(WebURL webUrl, int statusCode, String statusDescription) {                
            if (statusCode != HttpStatus.SC_OK) {
                stats.addErrorPages();
            }
        }

}
