package com.fang.www.wg.crawler;

import java.net.URLDecoder;

///**
// * 
// */
//package com.fang.www.wg.crawler;
//
//import java.io.BufferedInputStream;
//import java.io.BufferedReader;
//import java.io.IOException;
//import java.io.InputStream;
//import java.io.InputStreamReader;
//import java.net.URLDecoder;
//import java.util.Scanner;
//import java.util.function.Predicate;
//import java.util.regex.Matcher;
//import java.util.regex.Pattern;
//
//import org.apache.http.Header;
//import org.apache.http.HttpEntity;
//import org.apache.http.client.ClientProtocolException;
//import org.apache.http.client.HttpClient;
//import org.apache.http.client.methods.CloseableHttpResponse;
//import org.apache.http.client.methods.HttpGet;
//import org.apache.http.impl.client.CloseableHttpClient;
//import org.apache.http.impl.client.HttpClients;
//import org.apache.http.util.EntityUtils;
//import org.apache.log4j.Logger;
//
//import com.fang.www.threadpool.MyThread;
//import com.fang.www.threadpool.ThreadPool;
//import com.fang.www.wg.util.DisCompetitionColection;
//import com.fang.www.wg.util.RegexUtil;
//import com.fang.www.wg.util.UrlCollection;
//
///**
// * @author 王刚
// *
// */
public class PageCollects {
//	//浏览器类 （会提供几种验证，但是Apache Client api还不太熟悉）
////	Logger logger = Logger.getLogger(PageCollects.class);
//	private CloseableHttpClient httpclient ;
//	//url容器   
//	private UrlCollection urlCollection;
//	//正则工具类
//	private RegexUtil regexUtil;
//	//排重容器：未来会根据数据需求提供三种容器：1集合（存内存里，但爬不了多少数据，因为基于内存），2：mysql数据库（几百万轻飘飘但是越往后磁盘io限制，还有用行做数据块的限制（我可能会考虑下用hbase））3：布隆过滤（基于很多hash确定的数据带，省空间但是会有错误几率）
//	private DisCompetitionColection disCompetitionColection;
//	public PageCollects( UrlCollection urlCollection, RegexUtil regexUtil,
//			DisCompetitionColection disCompetitionColection) {
//		super();
//		this.urlCollection = urlCollection;
//		this.regexUtil = regexUtil;
//		this.disCompetitionColection = disCompetitionColection;
//	}
//	public  void getPageEntity () throws ClientProtocolException, IOException, InterruptedException{
//		if(urlCollection.isEmpty()){
//			wait();
//			System.out.println("没有准备好");
//			return ;
//		}else{
//			String url =urlCollection.getUrl();
//			if(urlCollection.getSize()>5){
//				notifyAll();
//			}
////			logger.info("需要爬"+url);
//			linkWebForScrath(url);
//		}
//	}
//	private void linkWebForScrath(String url) {
//		httpclient =HttpClients.createDefault();
//		HttpGet httpGet;
//		try {
//			httpGet = new HttpGet(url);
//		} catch (Exception e1) {
//			// TODO Auto-generated catch block
//			e1.printStackTrace();
//			return;
//		}
//		CloseableHttpResponse response =null;
//		HttpEntity entity =null;
//		try {
//			response=httpclient.execute(httpGet);
//			System.out.println(response.getStatusLine());
//			entity = response.getEntity();
//			if(entity==null){
//				System.out.println("被屏蔽了嘛？");
//			}
//			this.handlePageContent(entity);
//			EntityUtils.consume(entity);
//		} catch (Exception e) {
//			// TODO Auto-generated catch block
//			e.printStackTrace();
//		}finally{
//			try {
//				response.close();
//			} catch (IOException e) {
//				// TODO Auto-generated catch block
//				e.printStackTrace();
//			}
//		}
//	}
//	public  void handlePageContent(HttpEntity httpEntity) throws UnsupportedOperationException, IOException{
//		InputStream inputStream =httpEntity.getContent();
//		if(inputStream.available()==0){
//			System.out.println("屏蔽类");
//		}
//		BufferedReader bis =new BufferedReader(new InputStreamReader(inputStream, "GBK"));
//		Scanner scanner =new Scanner(bis);
//		while(scanner.hasNext()){
//			String result = scanner.nextLine();
////			System.out.println(result);
//			Pattern pattern =regexUtil.getPattern();
//			Matcher matcher = pattern.matcher(result);
//			if(matcher.find()&&!disCompetitionColection.ifDispetion(matcher.group("url"))){
//				/*disCompetitionColection.addDisCompetitionColection(matcher.group("url"));*/
//				urlCollection.addCollects(matcher.group("url"));
//				System.out.println(matcher.group("url"));
//			}
//		}
//	}
//	//href="http://news.163.com/16/1111/15/C5JOLIE10001875N.html?f=bj_news#loc=13"
//	//http://news.163.com/photoview/3R710001/29491.html#p=8GVN6DFN3R710001
	public static void main(String[] args) {
////		String url =URLDecoder.decode("\u7528\u6253\u9ebb\u5c06\u7684\u7cbe\u795e\u53bb\u5de5\u4f5c\uff0c\u8fd9\u4e16\u4e0a\u6050\u6015\u5c31\u6ca1\u6709\u4ec0\u4e48\u5e72\u4e0d\u597d\u7684\u5de5\u4f5c\u4e86\u3002");
////		System.out.println(url);
//		UrlCollection urlSet = new UrlCollection();
//		DisCompetitionColection dcc =new DisCompetitionColection();
//		RegexUtil regexUtil = null;
////		RegexUtil regexUtil = new RegexUtil("\\<a href=(\"(?<url>http://news.163.com.+?)\")+?.+\\>.+?\\</a\\>");
//		urlSet.addCollects("http://news.163.com");
//		PageCollects pc =new PageCollects(urlSet,regexUtil,dcc);
//		ThreadPool threadPool = new ThreadPool(10);
//		for(int i=0;i<10;i++){
//			threadPool.submitWorker(new MyThread(pc));
//		}
String a ="[0xab][0x82][0x97]?[0xb8]n[0xa2][0xbf][0x88][0xfb][0x94][0xb7][0xd2]@[0x1a]A.Al[0xe5][0xc]\"[0xa7][0xf6]4[0xef][0x10]R&[0x11][0x81]([0xa4][0xb3]yY![0xcf][0x83]R[0xb0][0x9b][0x97][0xa4][0xee][0x80]\\[0x8c][0xf8][0xb7]y[0x19][0x9f]n[0x1f][0xc][0x84][0xae][0xb4][0xfc][0x8c][0xc5][0xad][0xb0]/[0xd8]U[0xf3][0xb2][0xe8][0xe3]h[0xd7][0x96]\\[0x83][0xf2]$Z[0xc9][0x8]T[0xba][0x96][0x7]y[0x87]<[0xc8]G[0x13][0xf4][0xff]4[0xf5][0xd8]&[0xbb]1iP[0x93][0x1b][0x8e][0x88][0xe0][0xde][0xb1][0x86][0x0][0xd2][0xe7]|[0xbc]A.[0xca]L[0xdb]![0xa0][0xd5][0xde]][0x4]2[0xb7]T[0xee][0xa8][0x8a][0xf]w[0x1c]q[0xa7]9PV[0xb4][0x98][0xe5][0xaa][0x99][0x83][0x8][0x13][0x15]i[0x9a]d![0x86]h[0xf0]kA[0x1][0xa2][0x91];[0xd4][0xb7][0x1b][0xe9])[0x98]Xl[0xc0][0x12][0x1e][0xca]MX7![0xb2][0xb0][0xbd][0xe9][0x9e]g[0xd0][0xd6]x[0xf8][0x12][0xa9]%[0xa6][0xdf][0xb0][0xc3]&[0xde][0xdf][0xdb][0xb7][\\n]";
	}
}
////如果不分源码没有下,说明maven没有下那个，从maven dependencies和pom.xml里中找 没有下载的包 
////URLDecoder.decode 把传来的unicode转成中文
////注意组成和继承
