package com.jijs.main;

import org.apache.log4j.Level;
import org.apache.log4j.Logger;
import org.openqa.selenium.WebElement;
import org.openqa.selenium.htmlunit.HtmlUnitDriver;

import com.jijs.crawldb.DBManager;
import com.jijs.crawler.Crawler;
import com.jijs.fetcher.Executor;
import com.jijs.model.CrawlDatum;
import com.jijs.model.CrawlDatums;
import com.jijs.plugin.berkeley.BerkeleyDBManager;

/**
 * 有些爬取任务需要获取网页中由JS加载的信息（例如AJAX获取的信息），解决方案之一就是使用Selenium来进行爬取，Selenium是一个虚拟的浏览器，
 * 下面的例子给出如何使用虚拟浏览器来进行网页的爬取： 本教程演示如何利用WebCollector爬取javascript生成的数据
 *
 * @author jijs
 */
public class DemoSelenium {

	static {
		// 禁用Selenium的日志
		Logger logger = Logger.getLogger("com.gargoylesoftware.htmlunit");
		logger.setLevel(Level.OFF);
	}

	public static void main(String[] args) throws Exception {
		Executor executor = new Executor() {
			@Override
			public void execute(CrawlDatum datum, CrawlDatums next)
					throws Exception {
				HtmlUnitDriver driver = new HtmlUnitDriver();
				driver.setJavascriptEnabled(true);
				driver.get(datum.getUrl());
				WebElement element = driver
						.findElementByCssSelector("span#outlink1");
				System.out.println("反链数:" + element.getText());
			}
		};

		// 创建一个基于伯克利DB的DBManager
		DBManager manager = new BerkeleyDBManager("crawl");
		// 创建一个Crawler需要有DBManager和Executor
		Crawler crawler = new Crawler(manager, executor);
		crawler.addSeed("http://seo.chinaz.com/?host=www.tuicool.com");
		crawler.start(1);
	}

}