package org.wangbao.service;

import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.UUID;

import org.cyberneko.html.parsers.DOMParser;
import org.dom4j.Document;
import org.dom4j.Element;
import org.dom4j.Node;
import org.dom4j.XPath;
import org.dom4j.io.DOMReader;
import org.dom4j.xpath.DefaultXPath;
import org.w3c.dom.NodeList;
import org.wangbao.model.News;
import org.xml.sax.SAXException;

public class HtmlParserUtil {

	private String url;
	private List<News> resultList = new ArrayList<News>();

	DOMParser parser = new DOMParser();
	DOMReader dom = new DOMReader();
	Document urldoc;

	String param = "/ns?tn=news&from=news&cl=2&rn=10&ct=1&word=";
	private String keyword;
	
	private void analysizeUrl() {
		try {
			url = url + param + keyword;
			
			parser.parse(url);
			// parser.parse(new InputSource(instream));
			urldoc = dom.read(parser.getDocument());
		} catch (SAXException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		} catch (IOException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		}
	}

	public HtmlParserUtil(String url, String keyword) {
		this.url = url;
		this.keyword = keyword;
		analysizeUrl();

	}

	public static void main(String[] args) throws UnsupportedEncodingException {
		HtmlParserUtil hpu = new HtmlParserUtil("http://news.baidu.com", "成龙");
		// keyword = URLEncoder.encode(keyword, "utf-8");
		hpu.parserContent();

	}
	
	public List<News> parserTitle(){
		
		String newsPath = "//HTML/BODY/DIV/TABLE[@cellpadding='2']/TBODY/TR/TD/A/SPAN";
		XPath xpath = new DefaultXPath(newsPath);
		List elements = xpath.selectNodes(urldoc);
		Iterator iter = elements.iterator();

		for (int i = 0; iter.hasNext(); i++) {
			Element ele = (Element) iter.next();
			String href = ele.getParent().attribute("href").getText();
			String title = ele.asXML();
			News news = new News();
			//news.setId(i);
			news.setHref(href);

			while (title.indexOf("<") != -1) {
				String tmp = title.substring(title.indexOf("<"), title
						.indexOf(">") + 1);
				title = title.replaceAll(tmp, "");
			}
			news.setTitle(title);
			resultList.add(news);
		}
		return resultList;
		
	}

	public HashMap parserMapTitle(){
		
		String newsPath = "//HTML/BODY/DIV/TABLE[@cellpadding='2']/TBODY/TR/TD/A/SPAN";
		XPath xpath = new DefaultXPath(newsPath);
		List elements = xpath.selectNodes(urldoc);
		Iterator iter = elements.iterator();
		
		HashMap result = new HashMap();

		for (int i = 0; iter.hasNext(); i++) {
			Element ele = (Element) iter.next();
			String href = ele.getParent().attribute("href").getText();
			String title = ele.asXML();
			News news = new News();
			
			news.setHref(href);

			while (title.indexOf("<") != -1) {
				String tmp = title.substring(title.indexOf("<"), title
						.indexOf(">") + 1);
				title = title.replaceAll(tmp, "");
			}
			news.setTitle(title);
			String id = UUID.randomUUID().toString();
			news.setId(id);
			result.put(id, news);
		}
		return result;
		
	}
	
	public List parserContent() {

		// String gz =
		// "//HTML/BODY/DIV/TABLE[@cellpadding='2']/TBODY/TR/TD/A/SPAN/B";
		// Xpath解析存在一定的问题，不能直接获取当前标签，需从孩子回溯

		String newsPath = "//HTML/BODY/DIV/TABLE[@cellpadding='2']/TBODY/TR/TD/A/SPAN";

		XPath xpath = new DefaultXPath(newsPath);

		List elements = xpath.selectNodes(urldoc);

		Iterator iter = elements.iterator();

		for (int i = 0; iter.hasNext(); i++) {
			Element ele = (Element) iter.next();
			String href = ele.getParent().attribute("href").getText();
			String content = getDetail(href);

			if (content == null)
				continue;
			News news = new News();
			//news.setId(i);
			news.setContent(content);
			news.setHref(href);

			String title = ele.asXML();
			while (title.indexOf("<") != -1) {
				String tmp = title.substring(title.indexOf("<"), title
						.indexOf(">") + 1);
				title = title.replaceAll(tmp, "");
			}

			news.setTitle(title);
			System.out.println("title is:" + title);
			resultList.add(news);
		}
		return resultList;

	}

	public String getDetail(String href) {
		try {
			parser.parse(href);
			org.w3c.dom.Document doc = parser.getDocument();

			StringBuffer sb = new StringBuffer();
			getText(sb, doc.getElementsByTagName("BODY").item(0));
			
			return TextExtract.parse(sb.toString());

		} catch (SAXException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		} catch (IOException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		}
		return null;
	}

	private void getText(StringBuffer sb, org.w3c.dom.Node node) {

		if (node.getNodeType() == Node.TEXT_NODE) {

			String parent = node.getParentNode().getNodeName();
			if (parent.equalsIgnoreCase("p") || parent.equalsIgnoreCase("div")
					|| parent.equalsIgnoreCase("td"))
				sb.append(node.getNodeValue());
		}
		NodeList children = node.getChildNodes();
		if (children != null) {
			int len = children.getLength();
			for (int i = 0; i < len; i++) {
				getText(sb, children.item(i));
			}
		}
	}

	private boolean getText(StringBuffer sb, org.w3c.dom.Node node,
			String element) {
		if (node.getNodeType() == Node.TEXT_NODE) {
			if (element.equalsIgnoreCase(node.getNodeName())) {
				getText(sb, node);
				return true;
			}
		}
		NodeList children = node.getChildNodes();
		if (children != null) {
			int len = children.getLength();
			for (int i = 0; i < len; i++) {
				if (getText(sb, children.item(i), element)) {
					return true;
				}
			}
		}
		return false;
	}
}

// 1. 对中文参数使用URLEncoder.encode(src);来编码；
// 2. 设置GetMethod编码格式为utf-8：get_method.addRequestHeader("Content-type" ,
// "text/html; charset=utf-8");
// 3. 返回响应消息使用utf-8或gb2312编码：String response=new
// String(get_method.getResponseBodyAsString().getBytes("gb2312"));
// 根据 请求返回json文本
