package dhy.com.quarz;

import dhy.com.clawer.PicturePilepine;
import dhy.com.doamin.ClawerBean;
import dhy.com.clawer.BaiduPictureProcess;
import org.quartz.*;
import org.springframework.scheduling.quartz.QuartzJobBean;
import us.codecraft.webmagic.Spider;


//爬取图片的任务类
public class PictureClawerJob extends QuartzJobBean
{
    @Override
    protected void executeInternal(JobExecutionContext jobExecutionContext) throws JobExecutionException {
        //获取数据
        JobDataMap jobDataMap = jobExecutionContext.getJobDetail().getJobDataMap();

        ClawerBean clawerBean = (ClawerBean)jobDataMap.get("clawerBean");
        String filename = (String) jobDataMap.get("filename");
        String keyWord = clawerBean.getKeyWord();
        Integer pageNum = clawerBean.getPageNum();
        System.out.println("当前爬取页: "+pageNum);
        //如果超过了用户指定的爬取页数，就停止爬取
        if(clawerBean.getEndPageNum()!=null&&pageNum>clawerBean.getEndPageNum())
        {
            //停止当前定时器
            try {
                System.out.println("达到最大限制页面数量，停止爬取.....");
                QuartzUtils.deleteScheduleJob(jobExecutionContext.getScheduler(),
                        jobExecutionContext.getTrigger().getJobKey().getName());
                //恢复成初始值
                pageNum=1;
            } catch (SchedulerException e) {
                e.printStackTrace();
            }
            return;
        }
        //爬取百度图片,存放到本地文件夹中
        Spider spider = Spider.create(new BaiduPictureProcess())
                .addUrl("https://image.baidu.com/search/acjson?tn=resultjson_com&logid=10660754132115598609&ipn=rj&ct=201326592&is=&fp=result&" +
                        "queryWord=" + keyWord + "&cl=2&lm=-1&ie=utf-8&oe=utf-8&adpicid=&st=&z=&ic=&hd=&latest=&copyright=&" +
                        "word=" + keyWord + "&s=&se=&tab=&width=&height=&face=&istype=&qc=&nc=&fr=&expermode=&force=&" +
                        "pn=" + (pageNum) + "&rn=30&gsm=5a&1606053649620")
                .thread(5)
                .addPipeline(new PicturePilepine(filename));
        spider.run();
        //增加一页
       clawerBean.setPageNum(pageNum+1);
       jobDataMap.put("clawerBean",clawerBean);
    }
}
