/*
 *  Copyright 2009 Lucas Nazário dos Santos
 *  
 *  Licensed under the Apache License, Version 2.0 (the "License");
 *  you may not use this file except in compliance with the License.
 *  You may obtain a copy of the License at
 *  
 *      http://www.apache.org/licenses/LICENSE-2.0
 *  
 *  Unless required by applicable law or agreed to in writing, software
 *  distributed under the License is distributed on an "AS IS" BASIS,
 *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 *  See the License for the specific language governing permissions and
 *  limitations under the License.
 */
package net.sourceforge.retriever;

import java.net.URL;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;
import java.util.concurrent.TimeUnit;

import net.sourceforge.retriever.analyzer.Analyzer;
import net.sourceforge.retriever.analyzer.NullAnalyzer;
import net.sourceforge.retriever.executor.Executor;
import net.sourceforge.retriever.executor.ThreadExecutor;
import net.sourceforge.retriever.feedback.Feedback;
import net.sourceforge.retriever.feedback.NullFeedback;
import net.sourceforge.retriever.fetcher.dns.DNSResolver;
import net.sourceforge.retriever.fetcher.dns.JavaDNSResolver;
import net.sourceforge.retriever.filter.Filter;
import net.sourceforge.retriever.filter.NullFilter;
import net.sourceforge.retriever.frontier.Frontier;
import net.sourceforge.retriever.frontier.FrontierUrl;
import net.sourceforge.retriever.frontier.OneTimeOnlyFrontier;
import net.sourceforge.retriever.frontier.PoliteFrontier;

/**
 * <p>
 * This class is the entry-point to the whole crawling process.
 * </p>
 * 
 * <p>
 * Its usage is very simple, as shown bellow.
 * </p>
 * 
 * <p>
 * <code>
 * final Retriever retriever = new Retriever();<br>
 * <br>
 * retriever.addSeed(new URL("http://www.yahoo.com/"));<br>
 * retriever.addSeed(new URL("file://c:/"));<br>
 * <br>
 * retriever.addAnalyzer(new Analyzer() {<br>
 *     public void analyze(final Resource resource) {<br>
 *         System.out.println(resource.getURL().toExternalForm());<br>
 *     }<br>
 * });<br>
 * <br>
 * retriever.start();
 * </code>
 * </p>
 * 
 * <p>
 * The crawler has many more powerful and interesting features. To explore them, have a look at the
 * javadocs and take a step toward the sample folder.
 * </p>
 */
public class Retriever {

        static {
                // If entries live forever, the cache mechanism (java.net.InetAddress$Cache)
                // presents a deadlock behavior when Retriever is started with hundreds of threads.
                System.setProperty("networkaddress.cache.ttl", "10");

                System.setProperty("java.protocol.handler.pkgs", "sun.net.www.protocol|jcifs");

                System.setProperty("http.agent", "Retriever");
        }

        private List<URL> seeds = new ArrayList<URL>();

        private Feedback feedback;

        private Filter filter = new NullFilter();

        private Analyzer analyzer = new NullAnalyzer();

        private Executor executor = new ThreadExecutor(50, 50, 60, TimeUnit.SECONDS, 1000);

        private Frontier frontier = new OneTimeOnlyFrontier(new PoliteFrontier());

        private boolean running;

        private List<RetrieverListener> listeners = new ArrayList<RetrieverListener>();

        private DNSResolver dnsResolver = new JavaDNSResolver();



        /**
         * Creates a crawler object, which is the entry point to the crawling process.
         */
        public Retriever() {
                this(new NullFeedback());
        }



        /**
         * <p>
         * Creates a crawler object, which is the entry point to the crawling process.
         * </p>
         * 
         * <p>
         * It also allows users to pass in a <code>CrawlerFeedback</code> implementation, so they
         * can be aware of what is happening during the crawling process.
         * </p>
         * 
         * <p>
         * <b>ATTENTION!</b> This feature is still under tests.
         * </p>
         * 
         * @param feedback Object that gives users feedbacks about the crawling process.
         * @see net.sourceforge.retriever.feedback.Feedback
         */
        public Retriever(final Feedback feedback) {
                this.feedback = feedback;
        }



        /**
         * <p>
         * Plugs-in a filter module for resources.
         * </p>
         * 
         * <p>
         * If you want to remove the filter, please do the following:<br>
         * <code>retriever.setFilter(new NullFilter());</code>
         * </p>
         * 
         * @param filter To filter resources during the crawling process.
         * @see net.sourceforge.retriever.filter.Filter
         */
        public void setFilter(final Filter filter) {
                this.filter = filter;
        }



        /**
         * TODO Write javadoc.
         */
        public Frontier getFrontier() {
                return this.frontier;
        }



        /**
         * <p>
         * The <code>Frontier</code> is the component responsible for holding the list of URLs to be
         * crawled.
         * </p>
         * 
         * <p>
         * The crawler comes with a standard implementation of the frontier represented by the class
         * <code>BreadthFirstFrontier</code>.
         * </p>
         * 
         * @param frontier The <code>Frontier</code> component.
         * @see net.sourceforge.retriever.frontier.Frontier
         * @see net.sourceforge.retriever.frontier.PoliteFrontier
         */
        public void setFrontier(final Frontier frontier) {
                this.frontier = frontier;
        }



        /**
         * <p>
         * This method plugs the module responsible for analyzing crawled resources.
         * </p>
         * 
         * <p>
         * By analyzing we mean any after-crawling process, like the persistence of crawled data
         * into indexes, databases, and so forth.
         * </p>
         * 
         * <p>
         * If you want to remove the analyzer, please do the following:<br>
         * <code>retriever.setAnalyzer(new NullAnalyzer());</code>
         * </p>
         * 
         * <p>
         * Users must provide their own <code>Analyzer</code> implementation.
         * </p>
         * 
         * @param analyzer The module responsible for analyzing resources after they are crawled.
         * @see net.sourceforge.retriever.analyzer.Analyzer
         */
        public void setAnalyzer(final Analyzer analyzer) {
                this.analyzer = analyzer;
        }



        /**
         * TODO Add javadoc.
         * 
         * @param dnsResolver
         */
        public void setDNSResolver(final DNSResolver dnsResolver) {
                this.dnsResolver = dnsResolver;
        }



        /**
         * <p>
         * The crawler is a multithreaded application, meaning that every URL is fetched and
         * analyzed by a different thread in the system.
         * </p>
         * 
         * <p>
         * To accomplish that, a pool of threads is used. With this pool it's possible for users to
         * specify some variables, like the maximum allowed number of simultaneous threads and for
         * how long each thread will be alive after becoming idle.
         * </p>
         * 
         * <p>
         * The standard pool implementation starts with 10 threads and grows up to 25, queuing a
         * maximum of 10 requests after reaching the limit of 25 threads actively executing. Also,
         * each thread lives for as long as 60 seconds after becoming idle.
         * </p>
         * 
         * @param threadPoolSpecification The pool of threads specification.
         */
        public void setExecutor(final Executor executor) {
                this.executor = executor;
        }



        /**
         * <p>
         * Adds a seed to the crawler.
         * </p>
         * 
         * <p>
         * If you want to add more URLs while the crawler is executing, please refer to the
         * <code>addResourceWhileCrawling(URL)</code> method of this class.
         * </p>
         * 
         * @param seed A seed.
         */
        public void addSeed(final URL seed) {
                this.seeds.add(seed);
        }



        /**
         * If the crawler is currently executing, this method gives users the chance of adding more
         * resources to be crawled.
         * 
         * @param resource Adds one more resource to be crawled during the crawler's execution.
         */
        public void addResourceWhileCrawling(final URL resource) {
                final String url = resource.toExternalForm();

                final FrontierUrl frontierUrl = new FrontierUrl();
                frontierUrl.setUrl(url);
                frontierUrl.setIp(this.dnsResolver.getIP(url));

                this.frontier.enqueue(frontierUrl);
        }



        /**
         * Starts the crawling process.
         */
        public void start() {
                this.notifyListeners(Event.STARTED);

                this.feedback.info(null, "Crawler started on " + new Date().toString());

                this.running = true;

                this.frontier.reset();

                // TODO Give a better solution. With the current one, the pool is created at least
                // two times.
                this.executor = this.executor.create();

                for (URL seed : this.seeds) {
                        this.addResourceWhileCrawling(seed);
                }

                final Object emptyFrontierLock = new Object();
                while (running && (this.frontier.hasURLs() || this.executor.isExecuting())) {
                        this.waitIfFrontierIsEmpty(emptyFrontierLock);
                        this.executor
                                        .execute(this.frontier, this.filter, this.analyzer, this.feedback, emptyFrontierLock,
                                                        this.dnsResolver);
                }

                this.executor.shutdown();
                this.feedback.info(null, "Crawler stopped on " + new Date().toString());
                this.running = false;
                this.notifyListeners(Event.FINISHED);
        }



        /**
         * Stops the crawling process.
         */
        public void stop() {
                this.notifyListeners(Event.STOPPING);
                this.running = false;
        }



        /**
         * Tells users whether the crawler is running or not.
         * 
         * @return Boolean flag indicating if the crawler is running or not.
         */
        public boolean isRunning() {
                return this.running;
        }



        /**
         * <p>
         * Listeners allow users to act upon crawlers events.
         * </p>
         * 
         * <p>
         * It's possible, for instance, to do some cleanup after the crawled ends.
         * </p>
         * 
         * @param listener To listen for crawler events.
         */
        public void addListener(final RetrieverListener listener) {
                this.listeners.add(listener);
        }



        /**
         * Removes a listener.
         * 
         * @param listener The listener to be removed.
         */
        public void removeListener(final RetrieverListener listener) {
                this.listeners.remove(listener);
        }



        /**
         * Removes all listeners.
         */
        public void clearListeners() {
                this.listeners.clear();
        }



        private void notifyListeners(final Event event) {
                for (RetrieverListener listener : this.listeners) {
                        listener.onEvent(event, this);
                }
        }



        private void waitIfFrontierIsEmpty(final Object emptyFrontierLock) {
                if (!this.frontier.hasURLs()) {
                        synchronized (emptyFrontierLock) {
                                try {
                                        emptyFrontierLock.wait(5000);
                                } catch (final InterruptedException e) {
                                        e.printStackTrace();
                                }
                        }
                }
        }
}