package Data;
/**
 * Licensed to the Apache Software Foundation (ASF) under one or more
 * contributor license agreements.  See the NOTICE file distributed with
 * this work for additional information regarding copyright ownership.
 * The ASF licenses this file to You under the Apache License, Version 2.0
 * (the "License"); you may not use this file except in compliance with
 * the License.  You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

import edu.uci.ics.crawler4j.crawler.Page;
import edu.uci.ics.crawler4j.crawler.WebCrawler;
import edu.uci.ics.crawler4j.parser.HtmlParseData;
import edu.uci.ics.crawler4j.url.WebURL;

import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;

import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.*;
import java.util.regex.Pattern;


public class LocalDataCollectorCrawler extends WebCrawler {
    private final static Pattern FILTERS = Pattern.compile(".*(\\.(css|js|bmp|gif|jpe?g" + "|png|tiff?|mid|mp2|mp3|mp4"
            + "|wav|avi|mov|mpeg|ram|m4v|pdf" + "|rm|smil|wmv|swf|wma|zip|rar|gz))$");

    protected WebURL root;
    private CrawlStat crawlStat;
    private Properties myProps, restrictToURL;
    private Pattern FILTER_TYPES, FILTER_WORDS;
    private List<String> restrictURList;
    private Set<WebURL> webURLSet;
    public LocalDataCollectorCrawler() {
    }

    private String[] myCrawlDomains;

    @Override
    public void onStart() {
        myCrawlDomains = (String[]) myController.getCustomData();
        webURLSet = new HashSet<WebURL>();
    }


    public void configure() {
        //this.root=root;
        this.crawlStat = new CrawlStat();
        this.myProps = new Properties();
        FileInputStream propFile;
        try {
            propFile = new FileInputStream(Settings.CONFIGURATION);
            myProps.load(propFile);
        } catch (FileNotFoundException fnf_ex) {
            fnf_ex.getMessage();
        } catch (IOException io_ex) {
            io_ex.getMessage();
        }
        if (myProps.getProperty("regexUrlFileType") != null) {
            FILTER_TYPES = Pattern.compile(myProps.getProperty("regexUrlFileType"));
        }

        if (myProps.getProperty("regexUrlWords") != null) {
            FILTER_WORDS = Pattern.compile(myProps.getProperty("regexUrlWords"));
        }

        this.restrictToURL = new Properties();
        FileInputStream propInStream;
        try {
            propInStream = new FileInputStream(Settings.WEB_SITES);
            restrictToURL.load(propInStream);
        } catch (FileNotFoundException fnf_ex) {
            fnf_ex.getMessage();
        } catch (IOException io_ex) {
            io_ex.getMessage();
        }

        this.restrictURList = new ArrayList<String>();
        if (restrictToURL.getProperty("restrict1") != null) {
            boolean done = false;
            int i = 1;
            while (!done) {
                try {
                    restrictURList.add(restrictToURL.getProperty("restrict" + i));
                } catch (Exception ex) {
                    done = true;
                }
            }
        }
    }

    /* (non-Javadoc)
     * @see edu.uci.ics.crawler4j.crawler.WebCrawler#shouldVisit(edu.uci.ics.crawler4j.url.WebURL)
     * Metodo redifindo para ignorar qualquer link que n perten�a ao website
     * original ou ignorar qualquer tipo de js, css, imagens ou media
     */
    @Override
    public boolean shouldVisit(WebURL url) {
        String href = url.getURL().toLowerCase();
        //System.out.println(href);
        if (FILTERS.matcher(href).matches()) {
            return false;
        }
        for (String crawlDomain : myCrawlDomains) {
            if (href.startsWith(crawlDomain)) {
                return true;
            }
        }
        return false;
        /*configure();
        ///////////Restrict URL testing
        boolean patternString = false;
        if (restrictURList != null) {
            Iterator<String> urlIterator = restrictURList.iterator();
            while (urlIterator.hasNext() && !patternString) {
                patternString = org.apache.commons.lang3.StringUtils.contains(url.getURL(), urlIterator.next());
            }
        }
        if (patternString)
            return false;
        ///////////////////Fim do test//////////////


        if (this.crawlStat.containsWebURL(url))
            return false;

        if (FILTER_TYPES == null && FILTER_WORDS == null) {
            return true;
        } else if (FILTER_TYPES == null) {
            if (!FILTER_WORDS.matcher(url.getURL()).find())
                return true;
        } else if (FILTER_WORDS == null) {
            if (!FILTER_TYPES.matcher(url.getURL()).matches())
                return true;
        } else {
            if (!FILTER_TYPES.matcher(url.getURL()).matches() && !FILTER_WORDS.matcher(url.getURL()).find())
                return true;
        }
        return false;*/

    }


    /* (non-Javadoc)
    *  @see edu.uci.ics.crawler4j.crawler.WebCrawler#visit(edu.uci.ics.crawler4j.crawler.Page)
    */
    @Override
    public void visit(Page page) {

        // this.crawlStat.addWebURL(page.getWebURL());
        //this.crawlStat.incProcessedPages();

        System.out.println("Visiting: " + page.getWebURL().getURL());
        this.webURLSet.add(page.getWebURL());
        /*try {
            HTMLParser htmlParser = new HTMLParser();
            Document doc = Jsoup.parse(((HtmlParseData) page.getParseData()).getHtml());
            Set<Item> retrievedItem;

            retrievedItem = htmlParser.parseHTMLtoProducts(doc, page.getWebURL().getURL());

            if (retrievedItem.size() > 0)
                DownloadPage.writeURLtoFile(page);
        } catch (IOException io_ex) {
            io_ex.printStackTrace();
        }
        if (this.crawlStat.getTotalProcessedPages() % 50 == 0) {
            dumpMyData();
        }*/
    }

    private void dumpMyData() {

    }
    // We dump this crawler statistics after processing every 50 pages


    @Override
    public void onBeforeExit() {
        super.onBeforeExit();
        for(WebURL web:webURLSet){
            System.out.println("Visitou " + web.getURL());
        }
    }
}