package com.cp.weixin.task;

import com.cp.weixin.crawler.AbstractCrawler;
import com.cp.weixin.crawler.impl.WeixinCrawler;
import org.jsoup.nodes.Document;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.context.annotation.Configuration;
import org.springframework.scheduling.annotation.EnableScheduling;
import org.springframework.scheduling.annotation.Scheduled;
import org.springframework.stereotype.Component;

import java.time.LocalDateTime;

//@Component
//@Configuration
//@EnableScheduling
public class ScheduleTask {


	@Autowired
	AbstractCrawler abstractCrawler;

	//
	// @Scheduled(initialDelay = 999999999l, fixedRate=1000*100000)
	private void configureTasks() {
		System.err.println("time: " + LocalDateTime.now());
		//第一次请求，获取总页码并爬取页码中的全部链接
		//-----13页被判断为机器爬取，需要重新爬
		int pageNum = 1;
		String query = "springboot";
		abstractCrawler.getDoc(pageNum, query);
		abstractCrawler.getLink();
		abstractCrawler.getDetail();
		int totalRecord = abstractCrawler.getTotalRecord();
		int pageCount = totalRecord%10 == 0 ? totalRecord/10 : (totalRecord/10+1);
		for(int page = pageNum + 1; page < pageCount; page++){
			abstractCrawler.getDoc(page, query);
			abstractCrawler.getLink();
			abstractCrawler.getDetail();
		}
		System.err.println("************ pass all url ****************");
	}
}
