package _caolihua;

import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.Reader;
import java.io.UnsupportedEncodingException;
import java.util.HashSet;
import java.util.Set;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

import com.kdtech.crawler.CrawlHTML;
import com.kdtech.entity.crawler.UrlMeta;

public class BaiDu_AnalyserImpl implements NewsAnalyser {

	private static Set<String> detailpageURLs=new HashSet<String>();

	
	
	public   NewsDocument getNewsDocument(String url){
		NewsDocument doc =new NewsDocument();
		UrlMeta meta=CrawlHTML.responseToURL(url);
		
		String htmltxt1=meta.getHtml();//同步
		String htmltxt2="";//异步html
		htmltxt1 = htmltxt1.replaceAll("[\\s&&[^ ]]", "").toLowerCase();
		Runtime rt = Runtime.getRuntime();
		String exec = "F:\\phantomjs\\bin\\phantomjs  F:\\phantomjs\\bin\\code.js " + url;
		Process p=null;
		Matcher mPL=null;
		
		InputStream is=null;
		Reader r=null;
		BufferedReader br=null;
		/*
		 * 判断是否需要异步解析
		 * 		boolean isAsyncAnalyse =false;
		 * 		根据url查询数据库返回这个regex规则，split 逗号切分，得到list
		 * 		for(list){
		 * 			if(p[i].matcher(html)){
		 * 				isAsyncAnalyse =true;
		 * 				break;
		 * 			}
		 * 		}
		 * 		if(isAsyncAnalyse){
		 * 			需要，执行异步解析，封装返回docoument
		 * 		}else{
		 * 			不需要，执行同步解析，放回document
		 * 		}
		 */
		
		if((mPL=Pattern.compile("<em id=\"cmtnum\">.*?</em>").matcher(htmltxt1)).find()){
			//异步解析
			try {
				 p = rt.exec(exec);
				 is = p.getInputStream();
				 r = new InputStreamReader(is,"UTF-8");
				 br = new BufferedReader(r);
				String line = null;
				while((line=br.readLine())!=null){
					htmltxt2=htmltxt2+line;
				}
				System.out.println("-------------------------------------------------异步解析结果----------------------------------------------");
				System.out.println(htmltxt2);
				p.waitFor();
			} catch (UnsupportedEncodingException e) {
				e.printStackTrace();
			} catch (IOException e) {
				e.printStackTrace();
			} catch (InterruptedException e) {
				e.printStackTrace();
			}finally{
				if(br!=null){
					try {
						br.close();
					} catch (IOException e) {
						e.printStackTrace();
					}
				}
			}
		}else{
			//同步解析
			doc=SyncAnalyzer.analyse(htmltxt1, url);
	
		}
		return doc;
	}
	
	
	
	public static void main(String args[]){

		BaiDu_AnalyserImpl analyser =new BaiDu_AnalyserImpl();
		
		System.out.println("--------------新闻详情页标准格式");
		analyser.getLevel1URLs("http://news.sohu.com/");
		for(String s:detailpageURLs){
			System.out.println(s);
		}
//		
//		//System.out.println(isDetailPage("http://news.qq.com/a/20161205/000615.htm"));
//		System.out.println("--------------------------所有url");
//		for(String s:getAllURLs("http://news.qq.com/")){
//			//获得所有url，里面也有新的入口格式，但是不一定是想要的新闻入口标准形式：如http://kf.qq.com/  客服的详情页
//			System.out.println(s);
//		}
//		
//		System.out.println("--------------------过滤得到新闻详情页的urls");
//		for(String s:getDetailURLS(getAllURLs("http://news.qq.com/"))){
//			System.out.println(s);
//		}
		
//		UrlMeta meta=CrawlHTML.responseToURL("http://news.qq.com/a/20161205/016940.htm");
//		String html=meta.getHtml();
//		System.out.println(html);
		
		//analyser.getNewsDocument("http://news.qq.com/a/20161206/001062.htm");
		
	}
	
	public  Set<String>getLevel1URLs(String indexURL){
		URLFormatImpl urlFormatImp = new URLFormatImpl();
		String indexPageHtml =CrawlHTML.GetHtml(indexURL);
		indexPageHtml =indexPageHtml.replaceAll("[\\s&&[^ ]]", "").toLowerCase();
		String temp="";
		//后期解耦数据库配置
		Pattern p1 =Pattern.compile("<div class=\"left\"><ul>.*?</ul>");
		Matcher m1 = p1.matcher(indexPageHtml);
		if(m1.find()){
			Pattern p2 =Pattern.compile("<li.*?>(.*?)href=\"(.*?)\"(.*?)</li>");
			Matcher m2 = p2.matcher(m1.group(0));
			while(m2.find()){
				//第二次匹配到li标签里的超链接地址
				detailpageURLs.add(urlFormatImp.getDetailFormat(m2.group(2)));
			}
		}

/*		<li class="more-container">
		<a href="#" class="more-trigger">更多</a>
		<div class="more-links">
		<a href="http://news.hao123.com/?src=baidunews_1016" mon="toptab=newssite&resouci=1">名站</a>
		<a href="http://tech.baidu.com/">科技</a>
		<a href="http://lady.baidu.com/">女人</a>
		<a href="http://youxi.news.baidu.com/">游戏</a>
		</div>
		</li>*/
		detailpageURLs.remove("#");
		return detailpageURLs;
	}
	/**
	 * 判定是否是腾讯的新闻详情页
	 */
	public   boolean isDetailPage(String url){
		Matcher m=null;
		for(String s:detailpageURLs){
			if((m=Pattern.compile(s+".*?.htm").matcher(url)).find()){
				return true;
			}
		}
		
		return false;
	}
	
	public  Set<String>getAllURLs(String url){
		Set<String>urls = new HashSet<String>();
		String html =CrawlHTML.GetHtml(url);
		html =html.replaceAll("[\\s&&[^ ]]", "").toLowerCase();
		Pattern p = Pattern.compile("href=\"(.*?)\"");
		Matcher m = p.matcher(html);
		while(m.find()){
			urls.add(m.group(1));
		}
		return urls;
	}
	
	/**
	 * 过滤所有网页url，得到详情页的url list
	 * @param url
	 * @return
	 */
	/**
	 * @param allUrls
	 * @return
	 */
	public  Set<String>getDetailURLS(Set<String> allUrls){
		Set<String> detailURLs = new HashSet<String>();
		for(String s:allUrls){
			if(isDetailPage(s)){
				detailURLs.add(s);
			
			}
		}
		return detailURLs;
	}
}
