package com.flute.icrawler.app.processor.extractor;

import java.io.UnsupportedEncodingException;

import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import com.flute.icrawler.app.entity.CrawlResult;
import com.flute.icrawler.app.processor.AbstractProcessor;
import com.flute.icrawler.app.processor.extractor.special.ExtractorBelongingsAlgoFactory;
import com.flute.icrawler.app.processor.extractor.special.IExtractorBelongingsAlgo;
import com.flute.icrawler.app.service.CrawlService;
import com.flute.icrawler.app.util.ParameterKey;
import com.flute.icrawler.framework.framework.CrawlUrl;
import com.flute.icrawler.framework.framework.container.AdditionalUrlInformation;
import com.flute.icrawler.framework.framework.container.NumberStringValue;
import com.flute.icrawler.framework.framework.container.AdditionalUrlInformation.Additionals;
import com.flute.icrawler.framework.processor.result.AddUrlProcessorResult;

/**
 * 解析与链接相关的资源 如评论 图片等
 * 
 * @author jiangbo
 * 
 */
public class ExtractorBelongings extends AbstractProcessor {

	public String filterContentReg = "";

	public String matchContentReg = "";

	private static final Logger LOGGER = LoggerFactory
			.getLogger(ExtractorBelongings.class);

	@Override
	public void process(CrawlUrl url) {
		// TODO Auto-generated method stub
		IExtractorBelongingsAlgo belongingsAlgo = ExtractorBelongingsAlgoFactory
				.getInstance().getAlgo(url.getUrl());

		// 为空 说明该链接没有对应的相关处理策略
		if (null == belongingsAlgo) {
			LOGGER.debug("no belongingsAlgo url=" + url.getUrl());
			return;
		}
		CrawlResult result = (CrawlResult) url.getResultParameter(
				ParameterKey.RESULT_CRAWL).getValue();

		try {
			// 检查网页里面的内容以判断是否是需要解析相关链接

			if (!CrawlService.match(result.getContentAsString(),
					filterContentReg, matchContentReg)) {
				LOGGER.info("unmatch url get belongings=" + url.getUrl());
				return;
			}
		} catch (UnsupportedEncodingException e) {
			// TODO Auto-generated catch block
			LOGGER.error("", e);
		}

		String[] belongings = belongingsAlgo.extractor(url);

		if (null == belongings) {
			LOGGER.warn("belongings is null url=" + url.getUrl());
			return;
		}

		LOGGER.info("find {} belongings {}", belongings.length, url.getUrl());
		addNewUrls(url, belongings);

		if (LOGGER.isDebugEnabled()) {
			for (int i = 0; i < belongings.length; i++) {
				LOGGER.debug(belongings[i]);
			}
		}

	}

	/**
	 * 将发现的所有新链接加入
	 * 
	 * @param crawlUrl
	 *            当前处理的链接
	 * @param urls
	 *            由当前链接发现的链接
	 */
	private void addNewUrls(CrawlUrl crawlUrl, String[] urls) {
		// 获取不为空则加入到URL池中
		AddUrlProcessorResult parameter = new AddUrlProcessorResult();
		parameter.setName(ParameterKey.HTTP_LINKS);
		for (int i = 0; i < urls.length; i++) {

			CrawlUrl newUrl = new CrawlUrl(crawlUrl.getJob(), urls[i]);
			AdditionalUrlInformation info = crawlUrl
					.getAdditionalUrlInformation().newInstance();

			// 获取当前深度 并保留
			NumberStringValue currentDeep = (NumberStringValue) crawlUrl
					.getAdditionalUrlInformation().getValue(Additionals.Deep);
			info.updateValue(Additionals.Deep, new NumberStringValue(
					currentDeep.getNumber().intValue()));

			newUrl.setAdditionalUrlInformation(info);
			parameter.addUrl(newUrl);

			LOGGER.debug("ExtractorHTML:add url to pool->{}", urls[i]);
		}

		crawlUrl.registerProcessorResult(parameter);

	}

}
