package com.zc.study.java爬虫.netCase;


import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.util.Date;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.select.Elements;


public class Case1 {

    public static final String FILE_PATH = "E:\\article\\";

    public static void main(String[] args) throws IOException {
        Map<String, String> strMap = new HashMap<>();
        String url = "https://www.huxiu.com/";
        //获取主网站上的所有url文本链接
        Set<String> urls = getUrlList(url);
        urls.forEach(urlEntry -> {
            try {
                getArticleInfo(urlEntry, strMap);
            } catch (IOException e) {
                System.out.println("url io exception");
            }
        });
        //读取内容写入本地文件
        strMap.forEach(Case1::writerIntoFile);
    }

    private static void writerIntoFile(String k, String v) {
        String fileName = new Date().getTime() + k + ".txt";
        File file = new File(FILE_PATH + fileName);
        if (!file.exists()) {
            try {
                file.createNewFile();
            } catch (IOException e) {
                e.printStackTrace();
            }
        }
        try (
                FileOutputStream fos = new FileOutputStream(file)
        ) {
            byte bytes[] = new byte[1024];
            bytes = v.getBytes();
            int b = bytes.length;   //是字节的长度，不是字符串的长度
            fos.write(bytes, 0, b);
            fos.write(bytes);
            fos.close();
        } catch (Exception e) {
            e.printStackTrace();
        }
    }

    //爬取页面所有文档的文章信息
    private static void getArticleInfo(String url,
            Map<String, String> strMap) throws IOException {
        // 第一步：访问页面
        Document document = Jsoup.connect(url).get();
        // 第二步：解析页面
        Elements titleElements = document.getElementsByTag("title");
        String title = titleElements.get(0).text();
        Elements elements = document.select("div #article-content");
        String content = elements.text();
        // 第三步：打印
        //System.out.println("title:" + title);
        //System.out.println("content:" + content);
        strMap.put(title, content);

    }

    private static Set<String> getUrlList(String url) throws IOException {
        Document document = Jsoup.connect(url).get();
        Elements select = document.select("div .article-item a");
        Set<String> setUrl = new HashSet<>();
        select.forEach(entry -> setUrl.add("https://www.huxiu.com" + entry.attr("href")));
        return setUrl;
    }


}
