package com.kd.crawler.parser;

import java.io.File;
import java.net.URL;
import java.util.List;

import org.apache.log4j.Logger;

import com.kd.crawler.common.utils.StringUtils;
import com.kd.crawler.common.utils.UrlUtils;
import com.kd.crawler.common.utils.XMLParser;
import com.kd.crawler.entity.CrawlerEntry;
import com.kd.crawler.entity.Document;

/**
 * this is an abstract class, used to parse a html page.
 * of cause, it is safe thread
 * @author wyj
 *
 */
public abstract class HtmlParser {
	
	private static final Logger logger = Logger.getLogger(HtmlParser.class);	
		
	//private JobDao jobDao=new JobDao();
	/**
	 * get all of inner links that are detailed page from html of crawler result.
	 * @param cr
	 * @return
	 */
	public abstract List<String> getDetailedInnerLinks(CrawlerEntry cr);
	
	/**
	 * parser the crawl result into the specified bean BasicMeta
	 * @param cr
	 * @return
	 */
	public abstract Document parse(CrawlerEntry cr);
	
	/**
	 * whether to judge the specified parser path xml file is existed at the project
	 * @param path
	 * @return
	 */
	public boolean exists(String parserPath) {
		if(StringUtils.isBlank(parserPath)) {
			return false;
		}
		
		File file = new File(parserPath);
		return file.exists();
	}
	
	/**
	 * according to parser name, get its absolute path
	 * @param parserName
	 * @return absolute path
	 */
	public String getParserPath(String parserName) {
		if(StringUtils.isBlank(parserName)) {
			return "";
		}
		
		URL parserUrl = HtmlParserImpl.class.getClassLoader().getResource(parserName+".xml");
		if(null == parserUrl) {
			return "";
		}
		return parserUrl.getPath();
	}
	
	/**
	 * according to a parser Path, judge the url is detailed or not.
	 * @param url
	 * @param parserPath
	 * @return
	 */
	public boolean match(String url, String parserPath) {
		return UrlUtils.isDetailed(url, getRegex(parserPath));
	}
	
	/**
	 * get a list related with flag isDetailedPage
	 * @param parserPath
	 * @return 
	 */
	public List<String> getRegex(String parserPath) {
		if(StringUtils.isBlank(parserPath)) {
			return null;
		}
		List<String> regex = XMLParser.getMoreText(parserPath, "isDetailPage.regex");
		logFor(regex, "isDetailPage.regex", parserPath);
		return regex;
	}
	
	
	/**
	 * log for xPath is existed or not in config file(.xml)
	 * @param jsoups
	 * @param xPath, such like:title.jsoup, author.jsoup and so on
	 * @param parserPath
	 */
	public void logFor(List<String> jsoups, String xPath, String parserPath) {
		if(null==jsoups || 0==jsoups.size()) {
			logger.error("[" + parserPath + "]文件中没有配置[" + xPath + "]解析标签");
		}
	}
	
	/**
	 * log for flag is perfect or not in config file(.xml)
	 * @param url
	 * @param info
	 * @param flag, such like: title, author and so on
	 * @param parserPath
	 */
	public void logFor(String url, String info, String flag, String parserPath,CrawlerEntry crawlerEntry) {
		if(StringUtils.isBlank(info)) {
			//jobDao.update(crawlerEntry.getJobID(), crawlerEntry.getEntryId());
			logger.error("parser: " + url + "---->" + "根据现有的配置没有解析到[" + flag + "]，请在解析文件" + parserPath+ "添加更多的[" + flag + ".jsoup]标签");
		}
	}
	public void logFor(String url, Long info, String flag, String parserPath,CrawlerEntry crawlerEntry) {
		if(null==info || 0==info) {
			//jobDao.update(crawlerEntry.getJobID(), crawlerEntry.getEntryId());
			logger.error("parser: " + url + "---->" + "根据现有的配置没有解析到[" + flag + "]，请在解析文件" + parserPath+ "添加更多的[" + flag + ".jsoup]标签");
		}
	}
	
	public static void main(String[] args) {
		;
	}
	
}
