package com.itbaizhan.crawler;

import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.select.Elements;

import java.io.File;
import java.io.IOException;
import java.net.URL;

public class crawlerDemo1 {
    public static void main(String[] args) throws IOException {
        int min = 0;
        int max = 0;
        // 读取配置文件
        String path = crawlerDemo1.class.getClassLoader().getResource("com/itbaizhan/crawler/Crawler.xml").getPath();
        Document document1 = Jsoup.parse(new File(path), "UTF-8");
        Elements minEle = document1.select("min");
        Elements maxEle = document1.select("max");
        min = Integer.parseInt(minEle.first().text());
        max = Integer.parseInt(maxEle.first().text());


        // 循环爬取数据
        for (int i = min; i <= max; i++) {
            try {
                //1. 使用Jsoup获取网页HTML源文件，转为Document对象
                Document document = Jsoup.parse(new URL("http://daily.zhihu.com/story/"+i), 3000);
                //     System.out.println(document);
                //2. 通过Document对象，获取需要的Element对象
                Elements headerImgEle = document.getElementsByAttributeValue("alt", "头图");
                Elements titleEle = document.select(".DailyHeader-title");
                Elements authorEle = document.select(".author");
                Elements contentELe = document.select(".content");
                //3. 获取Element对象的数据。
                System.out.println(headerImgEle.first().attr("src"));
                System.out.println(titleEle.first().text());
                System.out.println(authorEle.first().text());
                System.out.println(contentELe.first().text());
            }catch (Exception e){}
        }
    }
}
