package com.wx.hd;

import java.io.IOException;
import java.util.Properties;
import java.util.Set;
import java.util.regex.Pattern;

import org.apache.log4j.Logger;

import edu.uci.ics.crawler4j.crawler.CrawlConfig;
import edu.uci.ics.crawler4j.crawler.CrawlController;
import edu.uci.ics.crawler4j.crawler.Page;
import edu.uci.ics.crawler4j.crawler.WebCrawler;
import edu.uci.ics.crawler4j.fetcher.PageFetcher;
import edu.uci.ics.crawler4j.parser.HtmlParseData;
import edu.uci.ics.crawler4j.robotstxt.RobotstxtConfig;
import edu.uci.ics.crawler4j.robotstxt.RobotstxtServer;
import edu.uci.ics.crawler4j.url.WebURL;

public class MyCrawler extends WebCrawler {
	private static Logger logger = Logger.getLogger(MyCrawler.class);
//        Pattern filters = Pattern.compile(".*(\\.(css|js|bmp|gif|jpe?g"
//                + "|png|tiff?|mid|mp2|mp3|mp4"
//                + "|wav|avi|mov|mpeg|ram|m4v|pdf"
//                + "|rm|smil|wmv|swf|wma|zip|rar|gz))$");
	//默认下载图片
	public static Pattern filters = Pattern.compile(".*(\\.(gif|jpe?g|png))$");

	public MyCrawler() {
	}

	public boolean shouldVisit(WebURL url) {
		String href = url.getURL().toLowerCase();
		if (filters.matcher(href).matches()) {
			return false;
		}
		if (href.startsWith("http://www.ics.uci.edu/")) {
			return true;
		}
		return false;
	}

	public void visit(Page page) {
		 //int docid = page.getWebURL().getDocid();
		 //String url = page.getWebURL().getURL();
		if (page.getParseData() instanceof HtmlParseData) {
			HtmlParseData htmlParseData = (HtmlParseData) page.getParseData();
			//String text = htmlParseData.getText();
			//String html = htmlParseData.getHtml();
			Set<WebURL> links = htmlParseData.getOutgoingUrls();
			for (WebURL webURL : links) {
				String urls = webURL.getURL().toLowerCase();
				if (filters.matcher(urls).matches()) {
					logger.debug(" ====== 下载图片的网络地址：" + urls);
					ImageDownload.downloadPicture(urls);
//					try {
//						Thread.sleep(1000);
//					} catch (InterruptedException e) {
//						e.printStackTrace();
//					}
				}
			}
		}

	}
	public static void uplaodImages(String args,String pcs) throws Exception {
		logger.debug(" ====== 撸图的网站地址 ：" + args);
		Properties props = new Properties();
		try {
			System.out.println("加载配置文件");
			props.load(MyCrawler.class.getClassLoader().getResourceAsStream("log4j.properties"));
		} catch (IOException e) {// TODO Auto-generated catch block
			e.printStackTrace();
		}
		// 爬虫状态存储文件夹，可以从这里边读取数据，以边恢复之前的爬取状态
		String crawlStorageFolder = "D://data//crawl//root";
		// 爬虫数量，也就是线程数，一般不超过CPU线程数
		int numberOfCrawlers = Integer.valueOf(pcs);
		// 爬虫配置
		CrawlConfig config = new CrawlConfig();
		config.setCrawlStorageFolder(crawlStorageFolder);
		PageFetcher pageFetcher = new PageFetcher(config);
		RobotstxtConfig robotstxtConfig = new RobotstxtConfig();
		RobotstxtServer robotstxtServer = new RobotstxtServer(robotstxtConfig, pageFetcher);
		CrawlController controller = new CrawlController(config, pageFetcher, robotstxtServer);
		// 要爬取的起始地址
		// controller.addSeed("http://www.ics.uci.edu/~lopes/");
		// controller.addSeed("http://www.ics.uci.edu/~welling/");
		controller.addSeed(args);
		// 启动
		controller.start(MyCrawler.class, numberOfCrawlers);
	}
	public static void main(String[] args) throws Exception {
		demo.filepath = "D://picture//";
		Properties props = new Properties();
		try {
			System.out.println("加载配置文件");
			props.load(MyCrawler.class.getClassLoader().getResourceAsStream("log4j.properties"));
		} catch (IOException e) {// TODO Auto-generated catch block
			e.printStackTrace();
		}
		// 爬虫状态存储文件夹，可以从这里边读取数据，以边恢复之前的爬取状态
		String crawlStorageFolder = "D://data//crawl//root";
		// 爬虫数量，也就是线程数，一般不超过CPU线程数
		int numberOfCrawlers = 1;
		// 爬虫配置
		CrawlConfig config = new CrawlConfig();
		config.setCrawlStorageFolder(crawlStorageFolder);

		/*
		 * Instantiate the controller for this crawl.
		 */
		PageFetcher pageFetcher = new PageFetcher(config);
		RobotstxtConfig robotstxtConfig = new RobotstxtConfig();
		RobotstxtServer robotstxtServer = new RobotstxtServer(robotstxtConfig, pageFetcher);
		CrawlController controller = new CrawlController(config, pageFetcher, robotstxtServer);

		// 要爬取的起始地址
		// controller.addSeed("http://www.ics.uci.edu/~lopes/");
		// controller.addSeed("http://www.ics.uci.edu/~welling/");
		controller.addSeed("https://bbs.csdn.net/topics/330051350");

		// 启动
		controller.start(MyCrawler.class, numberOfCrawlers);
	}
}
