package com.spiden.bazaspiden.processor;

import com.spiden.bazaspiden.pipeline.FarmerPipeline;
import com.spiden.bazaspiden.webmagic.selenium.NewSeleniumDownloader;
import org.springframework.stereotype.Component;
import us.codecraft.webmagic.Page;
import us.codecraft.webmagic.Site;
import us.codecraft.webmagic.Spider;
import us.codecraft.webmagic.processor.PageProcessor;
import us.codecraft.webmagic.selector.Html;

import java.util.List;

/**
 * @author 张高昌
 * @date 2024/1/9 10:22
 * @description: 爬取中国农网
 */
@Component
public class FarmerPageProcessor implements PageProcessor {

    // 部分一：抓取网站的相关配置，包括编码、抓取间隔、重试次数等
    private Site site;

    @Override
    public void process(Page page) {
        // 部分二：定义如何抽取页面信息，并保存下来
        Html html = page.getHtml();
        List<String> all = html.links().regex("https://www.farmer.com.cn/\\d+/\\d+/\\d+/\\d+\\.html").all();
        page.addTargetRequests(all);

        // 部分三：将爬取到的数据保存在page中，方便Pipeline对其持久化操作
        page.putField("title", page.getHtml().xpath("/html/body/div[5]/div[2]/div[2]/div[1]").toString());
        page.putField("content", page.getHtml().xpath("/html/body/div[5]/div[2]/div[2]/div[4]").toString());
    }

    @Override
    public Site getSite() {
        if (site == null) {
            site = Site.me().setRetryTimes(3).setSleepTime(1000);
        }
        return site;
    }

    public static void main(String[] args) {
        Spider.create(new FarmerPageProcessor())
                //从哪个网站开始爬取
                .addUrl("https://www.farmer.com.cn/farmer/xw/sntt/list.shtml")
                .setDownloader(new NewSeleniumDownloader("D:\\Program Files (x86)\\chromedriver_win32\\chromedriver.exe").setSleepTime(2000))
                .addPipeline(new FarmerPipeline()).run();
    }
}
