package com.flute.icrawler.app.processor.extractor;

import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;

import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import com.flute.icrawler.app.entity.CrawlResult;
import com.flute.icrawler.app.processor.AbstractProcessor;
import com.flute.icrawler.app.processor.extractor.algo.ExtractAlgoFactory;
import com.flute.icrawler.app.service.CrawlService;
import com.flute.icrawler.app.util.AbstractParserDocment;
import com.flute.icrawler.app.util.DynamicParserDocment;
import com.flute.icrawler.app.util.HtmlParserImp;
import com.flute.icrawler.app.util.IParser;
import com.flute.icrawler.app.util.ParameterKey;
import com.flute.icrawler.app.util.ParserException;
import com.flute.icrawler.app.util.StaticParserDocment;
import com.flute.icrawler.check.formxml.CrawlerCheckSerivce;
import com.flute.icrawler.framework.framework.CrawlUrl;
import com.flute.icrawler.framework.framework.container.AdditionalUrlInformation;
import com.flute.icrawler.framework.framework.container.NumberStringValue;
import com.flute.icrawler.framework.framework.container.AdditionalUrlInformation.Additionals;
import com.flute.icrawler.framework.processor.result.AddUrlProcessorResult;
import com.flute.icrawler.framework.processor.result.FailProcessorResult;
import com.flute.icrawler.framework.processor.result.SuccessProcessorResult;

/**
 * 解析HTML结构的文件中包含的链接
 * 
 * @author jiangbo
 * 
 */
public class ExtractorHTML extends AbstractProcessor {

	public String filterReg = "";

	public String matchReg = "";

	public int deep = 0;

	// 是否需要图片 0 不要 1要
	public int needImgs = 0;

	// 解析类型 0 静态 1动态
	public int extractorType = 0;

	private static final Logger LOGGER = LoggerFactory
			.getLogger(ExtractorHTML.class);

	public void process(CrawlUrl crawlUrl) {

		LOGGER.debug("ExtractorHTML:{}", crawlUrl.getUrl());

		File file = new File(crawlUrl.getUrl());

		if (!file.isDirectory()) {
			LOGGER.debug("not a Directory {}", crawlUrl.getUrl());
		} else {
			ExtractAlgoFactory.getFileDirectoryExtractAlgo().extract(crawlUrl);
			return;
		}

		// 深度
		if (deep != 0 && crawlUrl.getDeep() >= deep) {
			String errorMsg = "getDepth=" + crawlUrl.getDeep() + ";MaxDepth="
					+ deep;
			LOGGER.info("ExtractorHTML:{}", errorMsg);
			crawlUrl.registerProcessorResult(new SuccessProcessorResult());
			return;
		}

		CrawlResult result = (CrawlResult) crawlUrl.getResultParameter(
				ParameterKey.RESULT_CRAWL).getValue();

		// 获取链接内容的类型
		String strContentType = result.getContentType();

		// 不是期望的类型
		if (!isExpectedType(strContentType, ParameterKey.CONTENTTYPE_HTML)) {
			CrawlService.getInstance().logFailResult(
					"unExpectedType=" + strContentType + ";"
							+ crawlUrl.getUrl());
			LOGGER.warn("unExpectedType=" + strContentType + ";"
					+ crawlUrl.getUrl());
			return;
		}

		try {
			IParser parser = getParser(result);

			// 看是否需要图片
			if (1 == needImgs) {
				String[] imgs = parser.getImgs();
				// 获取为空
				if (null == imgs) {
					LOGGER.warn("ExtractorHTML:{}", "links is null");
					return;
				}

				LOGGER.info("ExtractorHTML:find {} imgs", imgs.length);
				addNewUrls(crawlUrl, imgs);
			}

			// 加入新发现的网页内容
			String[] links = parser.getLinks();
			// 获取为空
			if (null == links) {
				LOGGER.warn("ExtractorHTML:{}", "links is null");
				return;
			}
			LOGGER.info("ExtractorHTML:find {} links {}", links.length,
					crawlUrl.getUrl());

			addNewUrls(crawlUrl, links);

			if (LOGGER.isDebugEnabled()) {
				List<String> list = new ArrayList<String>();
				for (int i = 0; i < links.length; i++) {
					list.add(links[i]);
				}
				Map<String, List<String>> map = new HashMap<String, List<String>>();
				map.put(crawlUrl.getUrl(), list);
				CrawlerCheckSerivce.getInstance().writeXml(map);
			}
			// CrawlerCheckSerivce.getInstance().writeXml(new ha)

		} catch (ParserException e) {
			// TODO Auto-generated catch block
			LOGGER.error("ExtractorHTML:{}", e.getLocalizedMessage());

			crawlUrl.registerProcessorResult(new FailProcessorResult());
		}

	}

	private void addNewUrls(CrawlUrl crawlUrl, String[] urls) {
		List<String> list = new ArrayList<String>();
		for (int i = 0; i < urls.length; i++) {
			if (CrawlService.match(urls[i], filterReg, matchReg)) {
				list.add(urls[i]);
			}

		}

		addNewUrls(crawlUrl, list);
	}

	/**
	 * 将发现的所有新链接加入
	 * 
	 * @param crawlUrl
	 *            当前处理的链接
	 * @param urls
	 *            由当前链接发现的链接
	 */
	private void addNewUrls(CrawlUrl crawlUrl, List<String> urls) {
		// 获取不为空则加入到URL池中
		AddUrlProcessorResult parameter = new AddUrlProcessorResult();
		parameter.setName(ParameterKey.HTTP_LINKS);
		for (int i = 0; i < urls.size(); i++) {

			CrawlUrl newUrl = new CrawlUrl(crawlUrl.getJob(), urls.get(i));
			AdditionalUrlInformation info = crawlUrl
					.getAdditionalUrlInformation().newInstance();

			// 获取当前深度 并加1
			NumberStringValue currentDeep = (NumberStringValue) crawlUrl
					.getAdditionalUrlInformation().getValue(Additionals.Deep);
			// 继承父链接的ID
			NumberStringValue currentID = (NumberStringValue) crawlUrl
					.getAdditionalUrlInformation().getValue(
							Additionals.ResourceID);

			info.updateValue(Additionals.Deep, new NumberStringValue(
					currentDeep.getNumber().intValue() + 1));
			info.updateValue(Additionals.ResourceID, new NumberStringValue(
					currentID.getNumber().intValue()));

			newUrl.setAdditionalUrlInformation(info);
			parameter.addUrl(newUrl);

			LOGGER.debug("ExtractorHTML:add url to pool->{}", urls.get(i));
		}

		crawlUrl.registerProcessorResult(parameter);

	}

	private static boolean isExpectedType(String contentType,
			String[] expectedPrefixs) {

		for (int i = 0; i < expectedPrefixs.length; i++) {

			if (isExpectedType(contentType, expectedPrefixs[i])) {
				return true;
			}
		}
		return false;
	}

	private static boolean isExpectedType(String contentType,
			String expectedPrefix) {

		return contentType != null
				&& (expectedPrefix.contains(contentType) || contentType
						.contains(expectedPrefix));
	}

	private IParser getParser(CrawlResult result) {

		// TODO Auto-generated method stub
		String url = result.getUrl();
		String charset = result.getCharSet();

		AbstractParserDocment parserDocment = null;
		if (0 == extractorType) {
			// 生成静态解析Docment
			parserDocment = new StaticParserDocment(url, charset, result
					.getContent());
		} else {
			// 生成动态解析Docment
			parserDocment = new DynamicParserDocment(url, charset, result
					.getContent());
		}

		IParser parser = new HtmlParserImp(parserDocment,
				ParameterKey.XPATH_HREF);

		return parser;
	}

	public static void main(String[] args) {

		System.out.println(isExpectedType("text/html\r\n",
				ParameterKey.CONTENTTYPE_HTML));
		ExtractorHTML extractorHTML = new ExtractorHTML();

		CrawlResult result = new CrawlResult();
		byte[] bs = new byte[4 * 1024 * 1024];
		FileInputStream fileInputStream;
		try {
			fileInputStream = new FileInputStream("f://test.html");
			fileInputStream.read(bs);

			String content = new String(bs);

			result.setCharSet("utf-8");
			result.setUrl("http://music.douban.com/");
			result.setContent(content.getBytes());

			String[] links = null;
			// ParserSelecter parserSelecter = CrawlService.getInstance()
			// .getParserSelecter(extractorHTML,
			// CrawlConfigUtil.newCrawlConfig("f://downloads"));
			IParser parser = extractorHTML.getParser(result);
			links = parser.getLinks();

			for (int i = 0; i < links.length; i++) {
				System.out.println(links[i]);
			}

		} catch (FileNotFoundException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		} catch (IOException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		} catch (ParserException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		}

	}
}
