package cn.wit.test;
/**
 * 简易的网络爬虫，只先解析出url
 * @author 16604
 *
 */

import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.net.MalformedURLException;
import java.net.URL;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

public class DemoCrawler {

	// 把url的正则表达式定成一个常量
	private static final String PATH_URL = "((ht|f)tps?):\\/\\/[\\w\\-]+(\\.[\\w\\-]+)+([\\w\\-\\.,@?^=%&:\\/~\\+#]*[\\w\\-\\@?^=%&\\/~\\+#])?";

	public static void main(String[] args) {
		try {
			URL url = new URL("https://wenku.baidu.com/view/6a4ea39577a20029bd64783e0912a21615797f47.html");
			
			InputStream   webStream = url.openStream();//读取内容
			
			//网页读取
			BufferedReader br = new BufferedReader(
					new InputStreamReader(webStream,"gb2312")
			);
			
			//定义一个字符串,一次读取一行数据，存放在字符串里面
			String line = null;
			
			//用while循环，读取所有数据
			while((line = br.readLine())!=null) {
				
				//先打印出来看看，这是打印所有
				System.out.println(line);
//				isURL(line);
			}

		} catch (MalformedURLException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		} catch (IOException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		}

	}
	
	//解析url，只打印url
	public static void isURL(String line) {
		
		Pattern pattern = Pattern.compile(PATH_URL);
		
        Matcher matcher = pattern.matcher(line);
				
		while(matcher.find()) {
			
			System.out.println(matcher.group());
			
		}
		
	}

}
