package com.kd.crawler.parser.url;

import java.util.ArrayList;
import java.util.List;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;

import com.kd.crawler.common.utils.StringUtils;
import com.kd.crawler.common.utils.UrlUtils;

/**
 * this class is used to get some inner links from a page html
 * @author Administrator
 *
 */
public class UrlParser {

	/**
	 * 过滤抓取的链接
	 * */
	private static final String[] filters_1 = { "mailto:", "javascript:" };

	private static final String[] filters_2 = { "+", "$", "=", "'", "\"", "/$",
			"{" };//
	private static final String[][] filters_3 = { { "${", "}" },
			{ "=${", "}" }, { "=\"+", "+\"" }, { "=\\'+", "+\\'" },
			{ "<#", "#>" }, { "'+", "+" }, { "<", "</" } };//
	private static final String[] filters_4 = { "tencent://", "pps://",
			"thunder://", "ed2k://", "flashget://", "iku://"};
	/**
	 * 文件后缀过滤
	 */
	private static final String[] filterUrl={".doc",".pdf"
		,".wps",".xls",".app",".apk",".exe",".ppt",".rar",".zip",".jpg"
		,".jpeg",".png",".txt",
		".rtf",".dll",".gif",".cur",".ap",".jar",".chm"
		,"asx",".flv",".avi",".bak",".bat",".bin",".bmp",".dat",".db",".dbf",".ico",".log",".mp3",".mp4",".mdb"};
	
	/**
	 * get all of inner links. maybe some specific url need use specific operator.
	 * @param result
	 * @return
	 */

	public static List<String>  getInnerLinks(String response, String sourceUrl) {
		Document doc = Jsoup.parse(response);
		String baseHref = doc.select("base").attr("href");
		if(StringUtils.isBlank(baseHref)) {
			baseHref = sourceUrl;
		}
		doc.setBaseUri(sourceUrl);
		
		Elements elements=doc.select("a");
		if(elements!=null){
			List<String> urls=new ArrayList<String>();
			for(Element element:elements){
				String url = completeURL(filterURL(element.attr("href")), baseHref);
				url = UrlUtils.formatUrl(url);				
				urls.add(url);
			}
			return urls;
		}
		return null;
	
	}
	 
	 /**
	 * 从网页的链接标签中取出URL, and filter some bad links.
	 *
	 * @param node
	 *            链接标签 haveNofollow seo规则 ref="nofollow" source_url 来源地址
	 * @return URL
	 * */
	public static String filterURL(String href) {
		if (StringUtils.isBlank(href) || href.startsWith("#")) {
			return "";
		}
		
		href = href.trim()
				   .replaceAll("\\\r", "")
				   .replaceAll("\\\n", "")
				   .replaceAll("\\\t", "")
				   .replace("\\", "/");
		
		//filter
		for (int j = 0; j < filters_1.length; j++) {// 过滤此链接中是否包含js
			if (href.toLowerCase().indexOf(filters_1[j]) != -1) {
				return "";
			}
		}
		for (int j = 0; j < filters_2.length; j++) {
			if (href.startsWith(filters_2[j])) {
				return "";
			}
		}
		for (int j = 0; j < filters_3.length; j++) {
			if (href.indexOf(filters_3[j][0]) != -1
					&& href.indexOf(filters_3[j][1]) != -1) {
				return "";
			}
		}
		for (int j = 0; j < filters_4.length; j++) {// 过滤此链接中是否包含js
			if (href.toLowerCase().startsWith(filters_4[j])) {
				return "";
			}
		}
		for (int j = 0; j < filterUrl.length; j++) {// 过滤此链接的后缀
			if (href.toLowerCase().endsWith(filterUrl[j])) {
				return "";
			}
		}

		//the url protocol only thinks about http:// and https://
		if (href.indexOf("://") != -1 && !href.toLowerCase().startsWith("http"))
			return "";
		String[] split = href.split("\\.");// 过滤类似http://hao123.com/
		// 的错误链接
		if (split.length > 0) {
			String str = "";
			if (split[0].startsWith("http"))
				split[0] = split[0].replaceAll(" ", "");

			for (int k = 0; k < split.length; k++) {
				String string = split[k];
				str = str.concat(string);
				if ((k+1) != split.length)
					str = str.concat(".");
			}
			href = str;
		}
		href = href.replaceAll(" ", "%20");
		return href;
	}
	/**
	 * complete a url to become absolute.
	 * @param href
	 * @param baseHref
	 * @return
	 */
	public static String completeURL(String href, String baseHref) {
		if(StringUtils.isBlank(href)) {
			return "";
		}
		if (href.toLowerCase().startsWith("http://") || href.toLowerCase().startsWith("https://")) {// 判断此链接是否是完整的链接地址
			int indexOf = href.indexOf("#");// 截取#之后字符串
			if (indexOf != -1) {
				href = href.substring(0, indexOf);
			}
			if (1 != href.split("\\.").length) {
				return href;
			}
			return "";
		} else {
			if (href.startsWith("#")){
				return "";
			}
			if(StringUtils.isBlank(baseHref) || !baseHref.startsWith("http://")) {
				return "";
			}
			href = baseHref + href.substring(1);
			int indexOf = href.indexOf("#");
			if (indexOf != -1) {
				href = href.substring(0, indexOf);
			}
			return href;
		}
	}
	
	public static Elements getRegexHrefs(String response) {
    	Elements list = new Elements();
    	String regex = "<a href=[^+{]*?</a>";
    	Pattern pattern = Pattern.compile(regex);
		Matcher matcher = pattern.matcher(response);
		while (matcher.find()) {
			String matchs = matcher.group(0);
			Elements select = Jsoup.parse(matchs).select("a");
			list.addAll(select);
		}
		return list;
	}

}
