package com.abigdreamer.wordpress;

import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.jsoup.Connection;
import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;

import com.abigdreamer.newspider.Article;
/**   
 * 
 * @author Darkness
 * @date 2013-2-6 下午08:33:34
 * @website http://www.abigdreamer.com 
 * @version V1.0   
 */
public class HtmlParse {

	Log log = LogFactory.getLog(HtmlParse.class);
	Pattern pattern = Pattern.compile("[http|https|ftp]{3,4}://.*");

	private Map<String, String> urlInfos = new HashMap<String, String>();
	private List<String> extractFailUrls = new ArrayList<String>();

	/**
	 * 统一的连接接口
	 * 
	 * @param url
	 * @param timeout
	 * @param isGet
	 * @return
	 */
	private Document connect(String url, int timeout, boolean isGet) {
		Document doc = null;
		Connection conn = null;
		conn = Jsoup.connect(url).timeout(timeout).userAgent("Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)");
		try {
			if (isGet) {
				doc = conn.get();
			} else {
				doc = conn.post();
			}
		} catch (IOException e) {
			log.debug(e.getStackTrace());
		}
		return doc;
	}

	/**
	 * 由目的地址返回该目的地址所包含的所有的urls
	 * 
	 * @param url
	 *            目的地址
	 * @param timeout
	 *            超时
	 * @param isGet
	 *            get访问还是post访问
	 * @return Map: key是url value是url描述 这样做的目的是防止描述相同而漏掉url
	 */
	public Map<String, String> getUrlsInContent(String url, int timeout, boolean isGet) {
		Map<String, String> map = new HashMap<String, String>();
		Matcher matcher = pattern.matcher(url);
		if (matcher.matches()) {
			// 具有href 属性的链接

			Document doc = connect(url, timeout, isGet);
			/**
			 * 请求失败，稍后重新处理
			 */
			if (doc == null) {
				extractFailUrls.add(url);
				urlInfos.remove(url);
				return new HashMap<String, String>();
			} else {
				extractFailUrls.remove(url);
			}
			Elements links = doc.select("a[href]");
			for (Element link : links) {
				// key是url,value是url描述
				map.put(link.attr("abs:href"), link.text());
				String href = link.attr("abs:href");

				/**
				 * 非美文的不做处理
				 */
				if (href.indexOf("meiwen") < 0) {
					System.out.println("[忽略]" + href);
					continue;
				}
				/**
				 * 说明已经处理了该url，不再重复处理
				 */
				if (urlInfos.containsKey(href)) {
					System.out.println("[已处理]" + href);
					continue;
				}
				System.out.println("[正在处理]" + href);
				urlInfos.put(href, link.text());
				getUrlsInContent(href, timeout, isGet);
			}
		}
		return map;
	}

	/**
	 * 获取某网页的全部文本内容
	 * 
	 * @param url
	 * @param timeout
	 * @param isGet
	 * @return
	 */
	public String getContents(String url, int timeout, boolean isGet) {
		Document doc = null;
		String htmlText = null;
		Matcher matcher = pattern.matcher(url);
		if (matcher.matches()) {
			htmlText = connect(url, timeout, isGet).body().text(); // 具有 href
																	// 属性的链接
		}
		return htmlText;
	}
	
	public Article getArticle(String url, int timeout, boolean isGet) {
		
		Article article = new Article();
		article.setId(getArticleId(url));
		
		Document doc = null;
		String htmlText = null;
		Matcher matcher = pattern.matcher(url);
		if (matcher.matches()) {
			doc = connect(url, timeout, isGet);
			if(doc == null) {
				System.out.println("url抓取失败：" + url);
				return null;
			} else {
				System.out.println("url抓取成功：" + url);
			}
			htmlText = doc.body().html(); // 具有 href
																	// 属性的链接
			article.setTitle(doc.title());
		} else {
			System.out.println("非法的地址：" + url);
		}
		
		article.setUrl(url);
		article.setContent(htmlText);
		return article;
	}

	private static List<String> copyList(List<String> source) {
		List<String> desc = new ArrayList<String>(source.size());
		for (String obj : source) {
			desc.add(obj);
		}
		return desc;
	}

	public void reverseCatch(String url) {
		System.out.println("[开始处理]" + url);
		Map<String, String> maps = getUrlsInContent(url, 3 * 1000, true);

		System.out.println("=====处理失败的链接，共"+extractFailUrls.size()+"个==========================================");
		System.out.println("[开始处理]");
		for (String failUrl : copyList(extractFailUrls)) {
			if (urlInfos.containsKey(failUrl)) {
				System.out.println("[忽略：已处理]" + failUrl);
				continue;
			}
			getUrlsInContent(failUrl, 3 * 1000, true);
		}

		Map<String, String> articleUrls = new HashMap<String, String>();
		for (String _url : urlInfos.keySet()) {
			//
			// String str = getContents(o.toString(), 3 * 1000, true);
			if (isAtricle(_url)) {
				articleUrls.put(_url, maps.get(_url));
			}
		}

		for (String _url : articleUrls.keySet()) {
			System.out.println(_url + "@" + articleUrls.get(_url)); // Map的键 URL Map的值
																// 描述
		}
		System.out.println("total：" + articleUrls.size());
		System.out.println("fail：" + extractFailUrls.size());
	}

	private boolean isAtricle(String url) {
		// String url = "http://meiwen.pw/?p=465";
		Pattern pattern = Pattern.compile("http://meiwen.pw/\\?p=(\\d+)");
		Matcher matcher = pattern.matcher(url);
		if (matcher.find()) {
			// System.out.println(matcher.group(1));
			return true;
		}
		return false;
	}
	
	public String getArticleId(String url) {
		Pattern pattern = Pattern.compile("http://meiwen.pw/\\?p=(\\d+)");
		Matcher matcher = pattern.matcher(url);
		if (matcher.find()) {
			return "meiwen-" + matcher.group(1);
		}
		return "";
	}
}
