package cn.edu.hfut.dmic.webcollector.dmhy.crawler;

import java.io.IOException;

import javax.xml.parsers.ParserConfigurationException;

import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.xml.sax.SAXException;

import cn.edu.hfut.dmic.webcollector.crawler.DeepCrawler;
import cn.edu.hfut.dmic.webcollector.model.Links;
import cn.edu.hfut.dmic.webcollector.model.Page;
import cn.edu.hfut.dmic.webcollector.souplang.Context;
import cn.edu.hfut.dmic.webcollector.souplang.SoupLang;
import cn.edu.hfut.dmic.webcollector.util.RegexRule;

public class DmhyCrawler extends DeepCrawler {

	private static final Logger LOGGER = LoggerFactory
			.getLogger(DmhyCrawler.class);

	private static final String DMHY_DOMAIN = "http://share.dmhy.org/";
	private RegexRule regexRule;
	private SoupLang soupLang;

	public DmhyCrawler(String crawlPath) throws ParserConfigurationException,
			SAXException, IOException {
		super(crawlPath);
		regexRule = new RegexRule();
		addSeed(DMHY_DOMAIN);
		regexRule.addRule("http://share.dmhy.org/topics/view/.+html$");
		// regexRule.addRule("-.*(jpg|png|gif|#|\\?).*");
		soupLang = new SoupLang(
				ClassLoader.getSystemResourceAsStream("example/DmhyRule.xml"));
	}

	@Override
	public Links visitAndGetNextLinks(Page page) {
		/*
		 * soupLang.extract的返回值是一个Context类型的对象,
		 * 对象中存储了SoupLang所有包含name属性的元素，可以通过Context.get
		 * 或者Context.getString()等方法获取
		 */
		Context context = soupLang.extract(page.getDoc());
		if (null != context.get("title")) {
			String magnet = context.getString("content");
			LOGGER.info(magnet);
		}
		/* 返回链接，递归爬取 */
		Links nextLinks = new Links();
		nextLinks.addAllFromDocument(page.getDoc(), regexRule);
		return nextLinks;
	}

}
