package com.flute.icrawler.app.processor.extractor;

import java.io.UnsupportedEncodingException;

import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import com.flute.icrawler.app.entity.CrawlResult;
import com.flute.icrawler.app.processor.AbstractProcessor;
import com.flute.icrawler.app.service.CrawlService;
import com.flute.icrawler.app.util.IParser;
import com.flute.icrawler.app.util.ParameterKey;
import com.flute.icrawler.app.util.ParserException;
import com.flute.icrawler.app.util.TextParserImp;
import com.flute.icrawler.framework.framework.CrawlUrl;
import com.flute.icrawler.framework.framework.container.AdditionalUrlInformation;
import com.flute.icrawler.framework.framework.container.NumberStringValue;
import com.flute.icrawler.framework.framework.container.AdditionalUrlInformation.Additionals;
import com.flute.icrawler.framework.processor.result.AddUrlProcessorResult;
import com.flute.icrawler.framework.processor.result.SuccessProcessorResult;

/**
 * 将网页当成文本进行直接解析
 * 
 * @author jiangbo
 * 
 */
public class ExtractorText extends AbstractProcessor {

	public String filterReg = "";

	public String matchReg = "";

	public int deep = 0;

	private static final Logger LOGGER = LoggerFactory
			.getLogger(ExtractorText.class);

	@Override
	public void process(CrawlUrl crawlUrl) {
		// TODO Auto-generated method stub
		// 深度
		if (deep != 0 && crawlUrl.getDeep() >= deep) {
			String errorMsg = "getDepth=" + crawlUrl.getDeep() + ";MaxDepth="
					+ deep;
			LOGGER.info("ExtractorText:{}", errorMsg);
			crawlUrl.registerProcessorResult(new SuccessProcessorResult());
			return;
		}

		CrawlResult result = (CrawlResult) crawlUrl.getResultParameter(
				ParameterKey.RESULT_CRAWL).getValue();

		try {

			IParser parser = null;
			if (result.getCharSet().equalsIgnoreCase(
					ParameterKey.CHARSET_TYPE_BINARY)) {
				parser = new TextParserImp(new String(result.getContent()));
			} else {
				parser = new TextParserImp(new String(result.getContent(),
						result.getCharSet()));
			}

			String[] links = parser.getLinks();

			LOGGER.info("ExtractorText:find {} links {}", links.length,
					crawlUrl.getUrl());

			addNewUrls(crawlUrl, links);

		} catch (UnsupportedEncodingException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		} catch (ParserException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		}

	}

	/**
	 * 将发现的所有新链接加入
	 * 
	 * @param crawlUrl
	 *            当前处理的链接
	 * @param urls
	 *            由当前链接发现的链接
	 */
	private void addNewUrls(CrawlUrl crawlUrl, String[] urls) {
		// 获取不为空则加入到URL池中
		AddUrlProcessorResult parameter = new AddUrlProcessorResult();
		parameter.setName(ParameterKey.HTTP_LINKS);

		for (int i = 0; i < urls.length; i++) {

			if (CrawlService.match(urls[i], filterReg, matchReg)) {
				CrawlUrl newUrl = new CrawlUrl(crawlUrl.getJob(), urls[i]);
				AdditionalUrlInformation info = crawlUrl
						.getAdditionalUrlInformation().newInstance();

				// 获取当前深度 并加1
				NumberStringValue currentDeep = (NumberStringValue) crawlUrl
						.getAdditionalUrlInformation().getValue(
								Additionals.Deep);
				info.updateValue(Additionals.Deep, new NumberStringValue(
						currentDeep.getNumber().intValue() + 1));

				newUrl.setAdditionalUrlInformation(info);
				parameter.addUrl(newUrl);

				LOGGER.debug("ExtractorHTML:add url to pool->{}", urls[i]);
			} else {
				continue;
			}

		}

		crawlUrl.registerProcessorResult(parameter);

	}

}
