package com.ysq.excavator.content;

import java.net.URL;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

import org.apache.log4j.Logger;
import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;

import com.ysq.excavator.util.CommonUtil;

/**
 * 正文提取，通过url挖掘正文
 * @author Administrator
 *
 */
public class ExcavateContent {
	static Logger logger = CommonUtil.getLogger(ExcavateContent.class);

	/**
	 * 标点符号个数
	 */
	private final static int miniSignCount = 2;
	
	/**
	 * 最少字个数
	 */
	private final static int miniWordCount = 30;
	
	/**
	 * 需要过滤的标签
	 */
	private final static String[] removeArray = {
		//"script",
		"style",
		//"a",
		//"ul",
		//"li",
		//"img",
		//"input",
		//"textarea",
		//"select",
		//"noscript",
		//"iframe",
		//"form",
		//"h1",
		//"h2",
		//"h3",
		//"h4",
		//"h5",
		//"h6",
		//"object"
	};
	
	/**
	 * 标点符号
	 */
	private final static String signRegex = "[，。；！？.,;!?]";
	
	/**
	 * 匹配年月日时分秒
	 */
	private final static String dateRegex = "2\\d{3}[-/年]?\\d{1,2}[-/月]?\\d{1,2}日?( ?\\d{1,2}:\\d{1,2}(:\\d{1,2})?)?";
	
	public final static String formatType = "yyyyMMddHHmmss";
	
	/****************************************************/
	
	private String url;
	//private int customerId;
	//private int sourceId;
	private String urlContent;
	
	private String htmlText;
	
	private long publishDay;
	
	private String firstPublish;
	
	public ExcavateContent(String url) throws Exception{
		this.url = url;
		
		Document htmlDoc = getHtmlDocument(url);
		
		//去掉 body 的垃圾
		Element htmlClear = clearHtmlString(htmlDoc);
		this.htmlText = htmlClear.html();
		
		//this.customerId = customerId;
		//this.sourceId = sourceId;
		
		//WeiBoUtil.writeBodyToFile(bodyEle.html(), "D:/sourceFile/jsoup/bodyFile.html",false);
		
		//WeiBoUtil.writeBodyToFile(bodyHtml, "D:/sourceFile/jsoup/clearFile.html",false);
	}
	
	/** ======================================== 公共方法 ======================================== **/
	/**
	 * 获得 html doc
	 * @return
	 */
	public Document getHtmlDocument(String url) throws Exception{
		URL u = new URL(url);
		Document htmlDoc = Jsoup.parse(u, 4000);
		//Element body = htmlDoc.body();
		//htmlDoc = null;
		return htmlDoc;
	}
	
	/**
	 * 去除body 中的无用标签
	 * @param bodyStr
	 * @return
	 */
	public Element clearHtmlString(Element bodyEle){
		//过滤tag
		for(String remove : removeArray){
			bodyEle.select(remove).remove();
		}
		return bodyEle;
	}
	
	/**
	 * 获得body
	 */
	public Element getBodyElement(){
		Document htmlClearDoc = Jsoup.parse(htmlText);
		Element bodyClear = htmlClearDoc.body();
		return bodyClear;
	}
	
	/** ======================================== 正文提取相关方法 ======================================== **/
	
	/**
	 * 找出string当中包含的标点符号的个数
	 */
	public int getSignCount(String text){
		int result = 0;
		if(text != null && !"".equals(text)){
			int textLength = text.length();
			int repLength = text.replaceAll(signRegex, "").length();

			result = (textLength - repLength);
		}
		return result;
	}
	
	/**
	 * 用正则替换掉首位空白符，包括全角
	 * @param text
	 * @return
	 */
	public String getElementText(Element nodeEle){
		String text = nodeEle.text().trim();
		String regex = "(^[　  ]*)|([　  ]*$)";
		String result = text.replaceAll(regex, "");
		return result;
	}
	
	/**
	 * 判断节点的兄弟节点是否合法
	 * 如果兄弟节点不合法，直接跳出，此方法只是为了提高效率
	 * 可根据实际情况，可有可无
	 * @return
	 */
	public boolean isBrotherLicit(Element child){
		boolean flag = true;
		Elements brothers = child.siblingElements();
		String brothersText = brothers.text().trim();
		
		int brotherSign = getSignCount(brothersText);
		
		if(brotherSign < miniSignCount || brothersText.length() < miniWordCount){
			flag = false;
		}
		return flag;
	}
	
	/**
	 * 遍历下一个子节点
	 * 
	 */
	public String travelChildrenNode(Element nodeEle){
		Elements children = nodeEle.children();
		
		String wholeText = getElementText(nodeEle);//先node保存字符串
		
		String result = "";
		
		if(children.size() <= 0){//没有子节点，直接返回该节点
			int wholSign = getSignCount(wholeText);
			if(wholSign >= miniSignCount && wholeText.length() >= miniWordCount){
				result = wholeText;
			}
			return result;
		}
		
		for(int i=0;i<children.size();i++){
			Element child = children.get(i);
			
			//首先判断 text() 是否含有标点符号
			String childStr = getElementText(child);
			
			int markCount = getSignCount(childStr);
			
			if(markCount >= miniSignCount && childStr.length() >= miniWordCount){//合法，继续遍历
				String nextStr = travelChildrenNode(child);
				
				/*
				if(!"".equals(result) && !"".equals(nextStr)){
					result = result + "\r\n";
				}
				*/
				result = result + nextStr;
			}
			if(!isBrotherLicit(child)){
				break;
			}
		}
		children.remove();//移除子节点
		String simpleText = getElementText(nodeEle);
		
		int markCount = getSignCount(simpleText);
		
		if(markCount >= miniSignCount && simpleText.length() >= miniWordCount){
			/*
			if(!"".equals(result)){
				result = result + "\r\n";
			}
			*/
			//result = result + wholeText + "\r\n";
			result = result + wholeText;
		}
		return result;
	}
	
	/**
	 * 获得网页正文内容
	 * @param url
	 * @throws Exception
	 */
	public String getUrlContent(){
		if(urlContent != null && !"".equals(urlContent)){
			return urlContent;
		}
		
		Element bodyClear = getBodyElement();

		try {
			urlContent = travelChildrenNode(bodyClear);
			//CommonUtil.writeBodyToFile(urlContent, "D:/sourceFile/jsoup/travelFile.txt");
		} catch (Exception e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		}
		return urlContent;
	}
	/** ======================================== publishDay 提取相关方法 ======================================== **/
	
	
	/**
	 * 获得发布日期所在元素的 text
	 * @param url
	 * @return
	 */
	private String getPublishDayText(String ruleName){
		Element bodyClear = Jsoup.parse(htmlText);
		
		String result = null;
		
		if(ruleName != null){
			Elements selectEles = bodyClear.select(ruleName);
			
			result = selectEles.text();
		}
		return result;
	}
	
	/**
	 * 用正则匹配出日期,并返回 yyyy-
	 * @param publishText
	 * @return
	 */
	public String getMatcheDate(String publishText){
		
		String publishDay = null;
		
		Pattern patt = Pattern.compile(dateRegex);
		
		Matcher match = patt.matcher(publishText);
		
		if(match.find()){
			publishDay = match.group();
		}
		return publishDay;
	}
	
	/**
	 * 获得正文
	 * @param spiderRule:attribute ruleName is must
	 * @return
	 */
	public Long getPublishDay(String ruleName){
		if(publishDay != 0){
			return publishDay;
		}
		String publishText = getPublishDayText(ruleName);
		
		String matchDate = null;
		
		if(publishText != null && !"".equals(publishText)){
			matchDate = getMatcheDate(publishText);
		}else{
			String text = "publishDay error : " + this.url;
			logger.info(text);
		}
		
		if(matchDate != null && !"".equals(matchDate)){
			String formatDay = CommonUtil.formatPublishDay(matchDate);
			publishDay = new Long(formatDay);
		}
		return publishDay;
	}
	
	/** ======================================== 转载名称提取相关方法 ======================================== **/
	
	/**
	 * 获得转载网站名称
	 */
	public String getFirstPublish(String ruleName){
		if(firstPublish != null){
			return firstPublish;
		}
		
		if(ruleName != null){
			Element htmlClear = Jsoup.parse(htmlText);
			Elements selectEles = htmlClear.select(ruleName);
			
			firstPublish = selectEles.text();
		}
		firstPublish = CommonUtil.trimBlank(firstPublish);
		return firstPublish;
	}
	
	/** ======================================== 转载名称提取相关方法 ======================================== **/
	
	/**
	 * 测试
	 * @param args
	 */
	public static void main(String[] args) {
		/** 1,新浪 **/
		//String url = "http://news.sina.com.cn/c/2013-12-23/004529053173.shtml";
		//String url2 = "http://mil.news.sina.com.cn/2013-12-23/0934756305.html";
		
		/** 2,腾讯 **/
		//String tengXun = "http://news.qq.com/a/20131223/012604.htm";
		//String tengXun2 = "http://news.qq.com/a/20131222/003796.htm";
		
		/** 3,搜狐 **/
		//String souHu = "http://news.sohu.com/20131223/n392241197.shtml";
		//String souHu2 = "http://star.news.sohu.com/20131223/n392232877.shtml";
		
		/** 4,网易网 **/
		//String wangYi01 = "http://news.163.com/13/1224/02/9GQVBO4O00014AED.html";
		//String wangYi02 = "http://news.163.com/13/1224/02/9GR02CG100014JB6.html";
		//String wangYi203 = "http://news.163.com/13/1224/03/9GR4C17P0001124J.html";
		
		/** 5,凤凰网 **/
		//String fengHuang = "http://news.ifeng.com/mainland/special/changesanhao/content-3/detail_2013_12/02/31708542_0.shtml";
		//String fengHuang02 = "http://news.ifeng.com/mainland/special/fanfu/content-3/detail_2013_12/21/32353778_0.shtml";
		//String fengHuang03 = "http://news.ifeng.com/mainland/detail_2013_09/05/29342046_0.shtml";
		//String fengHuang04 = "http://news.ifeng.com/mainland/special/zhongmeinanhaiduizhi/content-5/detail_2013_12/24/32424575_0.shtml";
		
		/** 6,新华 **/
		//String xinHua = "http://news.xinhuanet.com/2014-02/18/c_119392891.htm";
		
		//String zhongGuo = "http://sports.china.com.cn/sochi2014/2014-02/19/content_31518466.htm";
		
		String renMin = "http://politics.people.com.cn/n/2014/0219/c1024-24398453.html";
		
		try {
			ExcavateContent ec = new ExcavateContent(renMin);
			
			String contentText = ec.getUrlContent();
			
			System.out.println(contentText);
			
			String publishDayRule = "";
			
			long day = ec.getPublishDay(publishDayRule);
			
			System.out.println(day);
			
			String firstPublishRule = "";
			
			String printName = ec.getFirstPublish(firstPublishRule);
			
			System.out.println("转载网站名称：  " + printName);
			
			//ExcavateContent ec1 = new ExcavateContent(fengHuang,1);
			
			//ec.getUrlContent();
			
			//Long l = ec.getPublishDay();
			
			//System.out.println("发布日期： " + l);
			
			System.out.println("funish...");
		} catch (Exception e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		}
	}
}
