package cerebrum.crawler;

import cerebrum.common.EmptyChecker;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;

import java.io.IOException;
import java.util.ArrayList;
import java.util.List;

public class ImagesCrawler {
    public static void main(String[] args) {
        //            //这里是分享地址：
        String url1 = "https://www.lifeofpix.com/";
        System.out.println(lifeofpixHtmlAnalysis(url1));

    }

    /**
     * 爬取https://www.lifeofpix.com/的图片
     *
     * @param url
     * @return
     */
    public static List<String> lifeofpixHtmlAnalysis(String url) {
        try {
            Document document = CrawlerUtil.webCrawlerHTTP(url);
            Elements elements = document.select("a");
            List<String> imagesUrl = new ArrayList<>();
            for (Element element : elements) {
                String str = element.toString();
                List<String> images = CrawlerUtil.getImages(str);
                if (EmptyChecker.notEmpty(images)) {
                    imagesUrl.addAll(images);
                }
            }
            return imagesUrl;
        } catch (IOException e) {
            e.printStackTrace();
            return new ArrayList<>();
        }
    }

    /**
     * 爬取https://www.splitshire.com/的图片
     *
     * @param url
     * @return
     */
    public static List<String> splitshireHtmlAnalysis(String url) {
        try {

            Document document = CrawlerUtil.webCrawlerHTTP(url);
            Elements elements = document.select(".t-entry-visual-cont a .adaptive-async");
            List<String> urls = new ArrayList<>();
            for (Element element : elements) {
                //获取请求的地址
                String attr = element.attr("data-guid");
                urls.add(attr);
            }
            return urls;
        } catch (IOException e) {
            e.printStackTrace();
            return new ArrayList<>();
        }
    }
}
