package com.yanjiali.webmagic;

import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Component;
import us.codecraft.webmagic.Page;
import us.codecraft.webmagic.Site;
import us.codecraft.webmagic.Spider;
import us.codecraft.webmagic.processor.PageProcessor;
import us.codecraft.webmagic.scheduler.BloomFilterDuplicateRemover;
import us.codecraft.webmagic.scheduler.QueueScheduler;

import java.util.List;
import java.util.Random;


/**
 * @Package: com.yanjiali.webmagic
 * @Author: yanjiali
 * @Created: 2025/3/10 22:53
 * 爬虫获取数据
 */
@Component
public class GetDataSource implements PageProcessor {

    @Autowired
    private BingPipeline bingPipeline;  //注入对应处理数据的管道


    private static String[] userAgents = {  //用于随机Agents
            "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.131 Safari/537.36",
            "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:90.0) Gecko/20100101 Firefox/90.0",
            "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.131 Safari/537.36",
            "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Edge/92.0.902.67"
    };

    private Site site = Site.me()   //爬虫配置
            .addHeader("User-Agent", getRandomUserAgent())
            .addHeader("Accept", "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8")
            .addHeader("Accept-Language", "en-US,en;q=0.5")
            .addHeader("Referer", "https://www.bing.com/")
            .addHeader("DNT", "1")
            .addHeader("Upgrade-Insecure-Requests", "1")
            .setCharset("UTF-8")
            .setTimeOut(10000)  // 随机超时时间
            .setRetryTimes(3)
            .setSleepTime(getRandomDelay()); // 随机延迟

    private static String url = "https://www.bing.com/search?q=";   //爬取的网站，目前硬编码，后续扩展为策略模式，或策略工厂


    /**
     * 获取数据对外保留方法
     * @param query
     */
    public void getData(String query) {
        Spider.create(new GetDataSource())
                .addUrl(url + query)  //设置爬取数据的页面
                .setScheduler(new QueueScheduler().setDuplicateRemover(new BloomFilterDuplicateRemover(10000))) //布隆过滤
                .addPipeline(bingPipeline)
//                .addPipeline(new BingPipeline()) //TODO new 会导致其中bean注入失败
                .run();
    }


    /**
     * 该方法用于编写爬虫的抽取逻辑
     * @param page page
     */
    @Override
    public void process(Page page) {
        List<String> dataList = page.getHtml().css("li.b_algo").all();
        page.putField("dataList", dataList);
    }

    /**
     * Returns the site settings.
     *
     * @return site
     * @see Site
     */
    @Override
    public Site getSite() {
        return site;
    }

    /**
     * 获取随机用户代理
     * @return
     */
    private String getRandomUserAgent() {
        Random random = new Random();
        return userAgents[random.nextInt(userAgents.length)];
    }

    /**
     * 获取随机延迟
     * @return
     */
    private int getRandomDelay() {
        Random random = new Random();
        return random.nextInt(2000) + 1000;
    }
}
