/*
 * To change this template, choose Tools | Templates
 * and open the template in the editor.
 */
package net.cjb.malacma.rssfeed.crawler;

//import java.io.PrintWriter;
import java.net.MalformedURLException;
import java.net.URL;

import websphinx.Link;
import websphinx.Page;

/**http://www.google.com.br/search?q=.rss+
 *
 * @author malacma
 */
public class FeedCrawler extends AgentCrawler {

    private static final long serialVersionUID = 9015164947202781853L;
    private static final String[] URL_PATTERN = {"rss","xml","php","feed"};
    //private PrintWriter output;

    protected void doVisit(Page page) {
        URL url = page.getURL();
    }

    @Override
    public boolean shouldVisit(Link link) {
        URL linkUrl = link.getURL();
        for(String s: URL_PATTERN){
            if(linkUrl.toString().toUpperCase().contains(s.toUpperCase())){
                return true;
            }
        }
        return false;
    }

    /**
     * This is how we are called.
     * @param argv command line args.
     */
    public static void main(String[] argv) throws MalformedURLException, Exception {
        System.out.println("Testing Websphinx. . .");

        // Make an instance of own our crawler
        AgentCrawler crawler = new FeedCrawler();
        // Create a "Link" object and set it as the crawler's root
        Link link = new Link("http://www.surfline.com/rss/");
        crawler.setRoot(link);
        crawler.setMaxDepth(5);


        // Start running the crawler!
        System.out.println("Starting crawler. . .");

        Thread t = new Thread(crawler);
        t.start();
        //t.join();
    }
}
