package edu.rutgers.crawler;

import java.util.ArrayList;
import java.util.Random;

import org.htmlparser.Parser;
import org.htmlparser.util.NodeList;
import org.htmlparser.util.ParserException;

/**
 * Crawls the web: iteratively follows the links.
 * 
 * @author dashali
 */
public class Crawler {
	
	public void run() {
		ArrayList<String> seeds = new ArrayList<String>();
		seeds.add("http://google.com");

		ArrayList<String> nextSeeds = new ArrayList<String>();

		while (seeds != null && seeds.size() > 0) {
			nextSeeds = processList(seeds);
			seeds = filter(nextSeeds);
		}

	}

	/*
	 * Each link in the list is processed. First we collect links for crawling,
	 * second we search for ads.
	 */

	public ArrayList<String> processList(ArrayList<String> seeds) {
		ArrayList<String> nextSeeds = new ArrayList<String>();
		// initialize the list
		try {
			for (String url : seeds) {
				Parser parser = new Parser(url);

				// get links
				nextSeeds.addAll(LinkFinder.find(parser));
				// get ads
				NodeList list = parser.parse(null);
				// TODO: implement ad search
			}

		} catch (ParserException e) {
			System.out.println("Couldn't parse URL ");
			e.printStackTrace();
		}
		return nextSeeds;

	}

	/*
	 * This method filters the links that pass to the next round. At the moment
	 * we just care about the size of the list, it should not exceed some
	 * constant threshold. Once it exceeds it, we randomly pick maximum allowed
	 * number of links.
	 */

	public ArrayList<String> filter(ArrayList<String> seeds) {
		// dummy biased implementation
		ArrayList<String> result = new ArrayList<String>();
		Integer maxSize = 1000;
		if (seeds.size() < maxSize) {
			result = seeds;
		} else {
			Random rdm = new Random();
			for (int i = 0; i < maxSize; i++) {
				Integer pos = rdm.nextInt(seeds.size() + 1);
				result.add(seeds.get(pos));
				seeds.remove(pos);
			}
		}
		return result;
	}
}
