package com.zhucx.utils;


import java.io.BufferedReader;
import java.io.InputStreamReader;
import java.net.URL;
import java.net.URLConnection;
import java.util.Arrays;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

public class UrlSpiderUtil {
    /**
     * 定义相关全局变量
     * url:访问的地址
     * sb:缓存爬取出来的url
     */

    private static String url = "https://www.cncn.com/";
    private static StringBuffer sb = new StringBuffer("");

    public static void main(String[] args) throws Exception {
        String[] urls = urlSpider(url);
        BloomFilter filter = new BloomFilter();
        int num = 0;
        for (String s : urls) {
            if (!filter.contain(s)) {
                System.out.println(s);
                num++;
            }
        }
        System.out.println("用布隆算法去重后该页面url的数量为:"+num);
    }

    /**
     * 爬取网页数据
     */

    public static String[] urlSpider(String urlAddress) throws Exception {
        //定义一个 String[],接收url
        String[] urls = null;
        URL url = new URL(urlAddress);
        //创建连接
        URLConnection connection = url.openConnection();
        //添加User-Agent
        connection.addRequestProperty(
                "User-Agent", "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.119 Safari/537.36");
        BufferedReader br = new BufferedReader(new InputStreamReader(connection.getInputStream(), "GBK"));
        String line = null;
        //这里不能使用(line = br.readLine()).length()，因为最后没有数据的时候，会报空指针异常
        while ((line = br.readLine()) != null) {
            sb.append(line);
        }
        //利用正则或者Xpath判定
        urls = getUrl(sb);
        return urls;
    }

    /**
     * Regex Function,取得url
     */
    public static String[] getUrl(StringBuffer sb) {
        String[] urls = new String[0];
        String regex = "<a href=\"([(https)|(http)]+://www.cncn.com/[^\\\"]+)\"[^>]+>[\\u4e00-\\u9fa5]+</a>";
        Pattern pattern = Pattern.compile(regex);
        Matcher urlMatcher = pattern.matcher(sb);
        while (urlMatcher.find()) {
            String group = urlMatcher.group(1);
            urls = Arrays.copyOf(urls, urls.length + 1);
            urls[urls.length - 1] = group;
        }
        return urls;
    }
}
