package com.ziningmei.crawler;

import org.apache.commons.lang3.StringUtils;
import us.codecraft.webmagic.Page;
import us.codecraft.webmagic.Site;
import us.codecraft.webmagic.Spider;
import us.codecraft.webmagic.processor.PageProcessor;
import us.codecraft.webmagic.selector.Html;

import java.io.File;
import java.io.FileOutputStream;
import java.io.InputStream;
import java.io.OutputStream;
import java.net.URL;
import java.net.URLConnection;
import java.util.List;

/**
 * @author ziningmei
 */
public class Crawler implements PageProcessor {

    // 部分一：抓取网站的相关配置，包括编码、抓取间隔、重试次数等
    private Site site = Site.me().setRetryTimes(3).setSleepTime(1000);

    @Override
    // process是定制爬虫逻辑的核心接口，在这里编写抽取逻辑
    public void process(Page page) {
        Html html = page.getHtml();

        List<String> all = html.xpath("//img/@data-src").all();
        all.stream().filter(s -> !StringUtils.isEmpty(s)).forEach(s -> {
            try {
                String s1 = s.substring(0, s.indexOf("/640"));
                download(s, s1.substring(s1.lastIndexOf("/") + 1, s1.length()) + ".png", "/Users/ziningmei/www/");
            } catch (Exception e) {
                e.printStackTrace();
            }
        });

    }

    @Override
    public Site getSite() {
        return site;
    }

    public static void main(String[] args) {

        Spider.create(new Crawler())
                //从"https://github.com/code4craft"开始抓
                .addUrl("https://mp.weixin.qq.com/s?timestamp=1526311493&src=3&ver=1&signature=VRD4Jbb4n*R*XG0jPPrej8GFN10MMToLc1N4NqM9MbjwS4nE94DaI5v3Qk9algpB4NWDWtIXqz0VnlGGgwcJpUoWHWGS2-Kt6YW9h9eLV820E2k-PZbhSOP86FHnJo6Tux990bpCzfWAMlh5hAoAT6IRgqTqW2H-F3vbzidnDok=")
                //开启5个线程抓取
                .thread(5)
                //启动爬虫
                .run();
    }

    public static void download(String urlString, String filename, String savePath) throws Exception {
        // 构造URL
        URL url = new URL(urlString);
        // 打开连接
        URLConnection con = url.openConnection();
        //设置请求超时为5s
        con.setConnectTimeout(5 * 1000);
        // 输入流
        InputStream is = con.getInputStream();

        // 1K的数据缓冲
        byte[] bs = new byte[1024];
        // 读取到的数据长度
        int len;
        // 输出的文件流
        File sf = new File(savePath);
        if (!sf.exists()) {
            sf.mkdirs();
        }
        OutputStream os = new FileOutputStream(sf.getPath() + "/" + filename);
        // 开始读取
        while ((len = is.read(bs)) != -1) {
            os.write(bs, 0, len);
        }
        // 完毕，关闭所有链接
        os.close();
        is.close();
    }

}
