package com.zc.study.java爬虫.netCase;


import java.io.IOException;
import java.util.HashSet;
import java.util.Set;
import org.jsoup.Jsoup;
import org.jsoup.helper.HttpConnection;
import org.jsoup.nodes.Document;
import org.jsoup.select.Elements;


public class Case1Datasource {

    public static void main(String[] args) throws IOException {
        {
            //String url = "https://www.huxiu.com/article/311470.html";
            String url = "https://www.huxiu.com/";
            //获取主网站上的所有url文本链接
            Set<String> urls = getUrlList(url);
            urls.forEach(urlEntry -> {
                try {
                    getArticleInfo(urlEntry);
                } catch (IOException e) {
                    System.out.println("url io exception");
                }
            });


        }
    }

    //爬取页面所有文档的文章信息
    private static void getArticleInfo(String url) throws IOException {
        // 第一步：访问页面
        Document document = HttpConnection.connect(url).get();
        // 第二步：解析页面
        Elements titleElements = document.getElementsByTag("title");
        String title = titleElements.get(0).text();
        Elements elements = document.select("div #article-content");
        String content = elements.text();
        // 第三步：打印
        System.out.println("title:" + title);
        System.out.println("content:" + content);
    }

    private static Set<String> getUrlList(String url) throws IOException {
        Document document = Jsoup.connect(url).get();
        Elements select = document.select("div .article-item a");
        Set<String> setUrl = new HashSet<>();
        select.forEach(entry -> setUrl.add("https://www.huxiu.com" + entry.attr("href")));
        select.forEach(entry -> System.out.println(entry.attr("href")));
        return setUrl;
    }


}
