package com.johnny.xia.webcollector.base;

import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;

import cn.edu.hfut.dmic.webcollector.model.CrawlDatums;
import cn.edu.hfut.dmic.webcollector.model.Page;
import cn.edu.hfut.dmic.webcollector.plugin.berkeley.BreadthCrawler;

public class QylCrawler extends BreadthCrawler {
	
	private String url = "http://www.qylbbs5.com";

	/**
	 * @param crawlPath
	 *            crawlPath is the path of the directory which maintains
	 *            information of this crawler
	 * @param autoParse
	 *            if autoParse is true,BreadthCrawler will auto extract links
	 *            which match regex rules from pag
	 */
	public QylCrawler(String crawlPath, boolean autoParse) {
		super(crawlPath, autoParse);
		/* start page */
		this.addSeed(url);
		/* do not fetch jpg|png|gif */
		this.addRegex("-.*\\.(jpg|png|gif).*");
		/* do not fetch url contains # */
		this.addRegex("-.*#.*");
	}

	@Override
	public void visit(Page page, CrawlDatums next) {
		String url = page.url();
		/* if page is news page */
//		if (page.matchUrl(url)) {
			/* we use jsoup to parse page */
			Document doc = page.doc();
			Elements imgs = doc.getElementsByTag("img");
			// 遍历img标签并获得src的属性
			for (Element element : imgs) {
				// 获取每个img标签URL "abs:"表示绝对路径
				String imgSrc = element.attr("abs:src");
				// 打印URL
				if(imgSrc.startsWith(url)){
					System.out.println(imgSrc);
				}
				// 下载图片到本地
			}

			System.out.println("URL:\n" + url);
//			System.out.println(doc.html());
//			System.exit(1);

			/* If you want to add urls to crawl,add them to nextLink */
			/*
			 * WebCollector automatically filters links that have been fetched
			 * before
			 */
			/*
			 * If autoParse is true and the link you add to nextLinks does not
			 * match the regex rules,the link will also been filtered.
			 */
//			next.add(url);
//		}
	}

	public static void main(String[] args) throws Exception {
		QylCrawler crawler = new QylCrawler("crawl", true);

		/**
		 * HashSetNextFilter nextFilter = new HashSetNextFilter();
		 * nextFilter.add("xxxx"); crawler.setNextFilter(nextFilter);
		 **/

		crawler.setThreads(1);
		crawler.setTopN(100);
		// crawler.setResumable(true);
		/* start crawl with depth of 4 */
		crawler.start(100);
	}

}
