package com.wolfword.crawler.client.deal;

import java.io.File;
import java.util.HashMap;
import java.util.Map;

import org.json.JSONObject;

import com.wolfword.common.exception.CrawlException;
import com.wolfword.common.util.object.StringUtil;
import com.wolfword.crawler.client.config.CrawlerConfig;

import edu.uci.ics.crawler4j.crawler.CrawlConfig;
import edu.uci.ics.crawler4j.crawler.CrawlController;
import edu.uci.ics.crawler4j.fetcher.PageFetcher;
import edu.uci.ics.crawler4j.robotstxt.RobotstxtConfig;
import edu.uci.ics.crawler4j.robotstxt.RobotstxtServer;

/**
 * 爬虫核心
 * @author renwei
 * @date 
 */
public class Crawler implements Runnable {
	private String crawlStorageFolder = "./htmls";

	public void crawlHtmls() {

	}

	public void run() {
		while (true) {
			try {
				JSONObject json = ConnectServer.getRootUrl();
				if (json != null) {
					System.out.println("client json:"+json.toString());
					System.out.println("client id:"+json.get("id"));
					System.out.println("client url:"+json.get("url"));
					long id = Long.valueOf(String.valueOf(json
							.get("id")));
					String url = String.valueOf(json.get("url"));
					String contain_str = String
							.valueOf(json.get("contain_str"));

					// 判断要爬取的域名是否为空
					if (StringUtil.isEmpty(url))
						throw new CrawlException("要爬取的域名为空！");

					// 判断存储html的文件是否存在
					File file = new File(crawlStorageFolder);
					if (!file.exists())
						file.mkdirs();

					int numberOfCrawlers = 5;

					CrawlConfig config = new CrawlConfig();
					// 爬取数据存放目录
					config.setCrawlStorageFolder(crawlStorageFolder
							+ "/crawler");
					// 爬多少层，如入口A是1，A中有B，B中有C，那B是2，C中有D，C是3
					//config.setMaxDepthOfCrawling(-1);
					config.setMaxDepthOfCrawling(2);
					// 最多爬取多少个网页
					config.setMaxPagesToFetch(-1);
					// 如果意外终止掉了,配置恢复崩溃的爬虫
					// config.setResumableCrawling(true);

					// config.setShutdownOnEmptyQueue(false);
					// 同一个主机两个请求之间的延迟毫秒数，默认是200
					config.setPolitenessDelay(200);
					
					//设置连接超时毫秒数
					config.setConnectionTimeout(10*1000);
					

					PageFetcher pageFetcher = new PageFetcher(config);

					RobotstxtConfig robotstxtConfig = new RobotstxtConfig();

					RobotstxtServer robotstxtServer = new RobotstxtServer(
							robotstxtConfig, pageFetcher);

					CrawlController controller = new CrawlController(config,
							pageFetcher, robotstxtServer);
					// 添加从哪个域名开始爬取
					controller.addSeed(url);

					// 配置需要包含字符，存储路径
					if (StringUtil.isEmpty(contain_str))
						throw new CrawlException("contain_str不能为空!");
					String store_path = crawlStorageFolder + "/"
							+ contain_str.replace(".", "_");
					Map<String, Object> map = new HashMap<String, Object>();
					map.put("containStr", contain_str);
					map.put("storePath", store_path);
					map.put("suffix", ".html");
					map.put("id", id);
					controller.setCustomData(map);
					// 开始爬取
					controller.start(CrawlerConfig.class, numberOfCrawlers);

					controller.waitUntilFinish();
					System.out.println("----------------------------->爬取已经执行完毕！");
					
					//调用服务端统计接口
					ConnectServer.statistics(url);
					System.out.println("----------------------------->统计完毕！");
					
					// 爬取完后，上传文件到服务器
					ConnectServer.uploadFiles(store_path, contain_str);
					System.out.println("----------------------------->上传文件完毕！");
				} else {
					System.out.println("=============没有要爬取的域名");
					// System.exit(0);
				}
				
				System.out.println("=============休眠30秒");
				Thread.sleep(30 * 1000);
			} catch (CrawlException e) {
				e.printStackTrace();
				throw new CrawlException(e.getMessage());
			} catch (Exception e) {
				e.printStackTrace();
				throw new CrawlException("爬取失败！");
			}

		}
	}
}
