package com.gxljc.bear.crawler.commons;

import java.io.StringReader;
import java.util.List;
import java.util.Map;

import org.apache.hadoop.conf.Configuration;
import org.apache.log4j.Logger;
import org.codehaus.jackson.map.ObjectMapper;

import com.gxljc.commons.CrawlerUtils;
import com.gxljc.commons.redis.dao.Basegxljcedis;
import com.gxljc.commons.util.sql.BaseDao;
import com.gxljc.commons.util.sql.BaseDaoFactory;
import com.gxljc.bear.crawler.base.DolphinCrawlerConsts;

/**
 * 爬虫。经历四个步骤：产生种子、提交种子、等待爬虫结束、导出爬虫结果。
 * 
 * @author yunbiao
 * @since 2015-4-20 下午3:08:01
 */
abstract public class SparkCrawler {

	public static Logger LOG = Logger.getLogger(SparkCrawler.class);
	protected final static String CRAWL_LIST_KEY = "commons:crawler:spark:url";
	protected final static long ONE_DAY = 24L * 60L * 60L * 1000L;
	protected final Configuration conf;
	protected final BaseDao baseDao;

	private final static ObjectMapper jsonObjectMapper = new ObjectMapper();
	private long waitTimeout = 10L * 60L * 60L * 1000L;// 设置等待超时，10小时
	private String tid;

	public SparkCrawler(Configuration conf, String tid) {
		super();
		this.tid = tid;
		this.conf = conf;
		Basegxljcedis.getgxljcRedisAPI(conf);
		baseDao = BaseDaoFactory.getDaoBaseInstance(conf
				.get(DolphinCrawlerConsts.PARAM_MYSQL_URL));
	}

	public void crawl() {
		// 1. gen seeds
		Seeds seeds = genSeeds();
		if (seeds != null && !seeds.isEmpty()) {
			LOG.info("============= start spark crawler =============");
			// 加插
			prepare(seeds);
			// 2. submit seeds
			int retry = 100;
			String patchId = submitSeeds2SparkWithRetry(seeds.getUrls(), retry);
			if (patchId == null) {
				LOG.info("submit spark crawler error, patchId is null!");
				return;
			} else {
				LOG.info("submit spark crawler done , urlSize: "
						+ seeds.getUrls().size() + ", pid: " + patchId);
			}

			// 3. wait crawl finish
			waitFinish(patchId, waitTimeout);

			// 4. export
			export(seeds);
			LOG.info("------------ finish spark crawler ------------");
		}
	}

	public long getWaitTimeout() {
		return waitTimeout;
	}

	public void setWaitTimeout(long waitTimeout) {
		this.waitTimeout = waitTimeout;
	}

	/**
	 * 产生爬虫种子
	 * 
	 * @return
	 */
	abstract protected Seeds genSeeds();

	/**
	 * 一些准备工作
	 * 
	 * @param seeds
	 */
	protected void prepare(Seeds seeds) {

	}

	/**
	 * 提交爬虫种子
	 * 
	 * @param urls
	 * @param retry
	 * @return
	 */
	protected String submitSeeds2SparkWithRetry(List<String> urls, int retry) {
		String patchId = null;
		while (patchId == null && retry >= 0) {
			patchId = CrawlerUtils.addSeedsForSpark(tid, urls);
			if (patchId == null) {
				LOG.info("add spark crawler error, patchId is null, retry "
						+ retry);
				try {
					Thread.sleep(1000L);
				} catch (InterruptedException e1) {
				}
			}
			retry--;
		}
		return patchId;
	}

	/**
	 * 等待爬虫结束
	 * 
	 * @param patchId
	 * @param timeout
	 */
	protected void waitFinish(String patchId, long timeout) {
		int waitTimes = 0;
		long start = System.currentTimeMillis();
		while (!CrawlerUtils.isFinished(tid, patchId)) {
			waitTimes++;
			if (waitTimes % 10 == 0) {
				LOG.info("waitting for sparckCrawler finish , {tid:" + tid
						+ ",pid:" + patchId + "}");
			}
			try {
				Thread.sleep(1 * 1000L);
			} catch (InterruptedException e) {
			}
			long end = System.currentTimeMillis();
			if (timeout >= 0 && (end - start) > timeout) {
				LOG.error(String
						.format("waitting for sparckCrawler timeout, timeout=%d, now=%d",
								timeout, (end - start)));
				break;
			}
		}
		LOG.info("sparkCrawler has finished , {tid:" + tid + ",pid:" + patchId
				+ "}");
	}

	/**
	 * 导出结果
	 * 
	 * @param seeds
	 */
	abstract protected void export(Seeds seeds);

	@SuppressWarnings("unchecked")
	public static Map<String, Object> parseJson2Map(String json) {
		Map<String, Object> dataMap = null;
		try {
			dataMap = jsonObjectMapper.readValue(new StringReader(json),
					Map.class);
		} catch (Exception e) {
			LOG.error("parse json error!", e);
		}
		return dataMap;
	}
}
