package com.flute.icrawler.app.processor.store;

import org.slf4j.Logger;
import org.slf4j.LoggerFactory;


import com.flute.icrawler.app.entity.CrawlResult;
import com.flute.icrawler.app.processor.AbstractProcessor;
import com.flute.icrawler.app.service.CrawlService;
import com.flute.icrawler.app.util.ParameterKey;
import com.flute.icrawler.config.CrawlConfig;
import com.flute.icrawler.framework.framework.CrawlUrl;
import com.flute.icrawler.framework.framework.IProcessor;

/**
 * 爬虫结果收集器
 * 
 * @author jiangbo
 * 
 */
public class CrawlResultCollector extends AbstractProcessor {

	private static final Logger LOGGER = LoggerFactory
			.getLogger(CrawlResultCollector.class);

	@Override
	/*
	 * 将结果收集到数据池中
	 */
	public void process(CrawlUrl url) {
		// 如果没有配置处理线程 则暂时不往结果池里写数据
		if (0 == CrawlService.getInstance().getResultDealThreadCount()) {
			return;
		}

		LOGGER.debug("CrawlResultCollector:{}", url.getUrl());
		CrawlResult crawlResult = (CrawlResult) url.getResultParameter(
				ParameterKey.RESULT_CRAWL).getValue();
		CrawlService.getInstance().getCrawlResultPool().addCrawlResult(
				crawlResult);
		LOGGER.info("CrawlResultCollector:addtoPool->{}", url.getUrl());
	}

}
