package com.kavian.blog.collect.site;

import java.util.ArrayList;
import java.util.List;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

import org.htmlparser.NodeFilter;
import org.htmlparser.Parser;
import org.htmlparser.filters.AndFilter;
import org.htmlparser.filters.HasAttributeFilter;
import org.htmlparser.filters.NodeClassFilter;
import org.htmlparser.tags.Div;
import org.htmlparser.tags.LinkTag;
import org.htmlparser.tags.MetaTag;
import org.htmlparser.tags.Span;
import org.htmlparser.util.NodeList;

import com.kavian.blog.domain.WPPost;
import com.kavian.common.util.HttpClientUtil;

/**
 * CSDN爬虫
 * 
 * @author kavian
 * 
 */
public class CollectCsdn {
	private final String URL = "http://blog.csdn.net";
	private final String CHARSET = "utf-8";

	/**
	 * 详细页面信息
	 * @param url
	 * @return
	 */
	public WPPost findDetailed(String url) {
		try {
			WPPost wp = new WPPost();

			String content = HttpClientUtil.httpGet(url, CHARSET);

			Parser parser = Parser.createParser(content, CHARSET);
			NodeFilter spanFilter = new AndFilter(new NodeClassFilter(
					Span.class), new HasAttributeFilter("class", "link_title"));
			NodeList spanList = parser.extractAllNodesThatMatch(spanFilter);

			if (spanList.size() == 0) {
				return null;
			}
			String title = spanList.elementAt(0).toPlainTextString().trim();
			wp.setTitle(title);
			// System.out.println(title);

			parser.reset();
			NodeFilter divFilter = new AndFilter(
					new NodeClassFilter(Div.class), new HasAttributeFilter(
							"class", "article_content"));
			NodeList divList = parser.extractAllNodesThatMatch(divFilter);
			Div div = (Div) divList.elementAt(0);
			String divHtml = div.getChildrenHTML().replaceAll("'", "\\\\'");
			divHtml += "<br /><p>转自："+url+"</p>";
			wp.setContent(divHtml);
			// System.out.println(div.getChildrenHTML());

			parser.reset();
			NodeFilter metaFilter = new AndFilter(new NodeClassFilter(
					MetaTag.class), new HasAttributeFilter("name",
					"description"));
			NodeList metaList = parser.extractAllNodesThatMatch(metaFilter);
			
			if(metaList.size() != 0){
				MetaTag meta = (MetaTag)metaList.elementAt(0);
				String metaContent = meta.getAttribute("content");
				wp.setDescription(metaContent);
//				System.out.println(metaContent);
			}

			return wp;

		} catch (Exception e) {
			e.printStackTrace();
		}

		return null;
	}

	/**
	 * 详细页面URL
	 * 
	 * @param url
	 * @return
	 */
	public List<String> searchDetailedUrl(String url) {
		List<String> list = new ArrayList<String>();
		try {
			String content = HttpClientUtil.httpGet(url, CHARSET);

			Parser parser = Parser.createParser(content, CHARSET);
			NodeFilter divFilter = new AndFilter(
					new NodeClassFilter(Div.class), new HasAttributeFilter(
							"class", "blog_list"));
			NodeList divList = parser.extractAllNodesThatMatch(divFilter);

			for (int i = 0; i < divList.size(); i++) {
				String divHtml = divList.elementAt(i).toHtml();

				Parser divParser = new Parser(divHtml);
				NodeFilter linkFilter = new NodeClassFilter(LinkTag.class);
				NodeList linkList = divParser
						.extractAllNodesThatMatch(linkFilter);

				LinkTag link = (LinkTag) linkList.elementAt(0);
				String href = link.getAttribute("href");
				list.add(href);
			}

			return list;

		} catch (Exception e) {
			e.printStackTrace();
		}
		return null;
	}

	/**
	 * 分页URL
	 * 
	 * @param url
	 * @return
	 */
	public List<String> searchPageUrl(String url) {
		List<String> list = new ArrayList<String>();
		try {
			String content = HttpClientUtil.httpGet(url, CHARSET);

			Parser parser = Parser.createParser(content, CHARSET);
			NodeFilter pageNavFilter = new AndFilter(new NodeClassFilter(
					Div.class), new HasAttributeFilter("class", "page_nav"));
			NodeList pageNavList = parser
					.extractAllNodesThatMatch(pageNavFilter);

			if (pageNavList.size() == 0) {
				return null;
			}

			String pageHtml = pageNavList.elementAt(0).toHtml();
			Parser pageParser = new Parser(pageHtml);
			NodeFilter spanFilter = new NodeClassFilter(Span.class);
			NodeList spanList = pageParser.extractAllNodesThatMatch(spanFilter);

			if(spanList.size() == 0){
				return null;
			}
			String getContent = spanList.elementAt(0).toPlainTextString()
					.trim();
			// System.out.println(getContent);
			Pattern p = Pattern.compile("共(.*?)页"); // 这个可以自定义的
			Matcher m = p.matcher(getContent);
			m.find();
			String pageCountStr = m.group().replaceAll("共", "")
					.replaceAll("页", "");
			int pageCount = Integer.parseInt(pageCountStr);

			for (int i = 1; i <= pageCount; i++) {
				String pageUrl = url + "?page=" + i;
				list.add(pageUrl);
			}

			return list;

		} catch (Exception e) {
			e.printStackTrace();
		}
		return null;
	}

	/**
	 * 获得页面分类
	 * 
	 * @return
	 */
	public List<String> searchSort() {
		List<String> list = new ArrayList<String>();
		try {
			String content = HttpClientUtil.httpGet(URL, CHARSET);

			Parser parser = Parser.createParser(content, CHARSET);
			NodeFilter divFilter = new AndFilter(
					new NodeClassFilter(Div.class), new HasAttributeFilter(
							"class", "side_nav"));
			NodeList divList = parser.extractAllNodesThatMatch(divFilter);

			if (divList.size() == 0) {
				return null;
			}

			Parser liParser = new Parser(divList.elementAt(0).toHtml());
			NodeFilter linkFilter = new NodeClassFilter(LinkTag.class);
			NodeList linkList = liParser.extractAllNodesThatMatch(linkFilter);

			for (int i = 0; i < linkList.size(); i++) {
				LinkTag link = (LinkTag) linkList.elementAt(i);
				String href = link.getAttribute("href");
				String url = URL + href;
				list.add(url);
			}

			return list;
		} catch (Exception e) {
			e.printStackTrace();
		}

		return null;
	}

}