package com.chance.cc.crawler.development.command.job.domain.bbs;

import com.alibaba.fastjson.JSON;
import com.chance.cc.crawler.core.CrawlerEnum;
import com.chance.cc.crawler.core.CrawlerJob;
import com.chance.cc.crawler.core.downloader.HttpPage;
import com.chance.cc.crawler.core.filter.FilterUtils;
import com.chance.cc.crawler.core.record.CrawlerRequestRecord;
import com.chance.cc.crawler.development.command.job.CrawlerJobCommand;
import com.chance.cc.crawler.meta.core.bean.CrawlerMetaConstant;
import com.chance.cc.crawler.meta.core.bean.job.CrawlerScheduleJob;
import org.apache.commons.lang3.StringUtils;

import static com.chance.cc.crawler.core.CrawlerEnum.CrawlerRequestType.turnPage;

/**
 * @Author Zhao.Hhuan
 * @Date Create in 2020/12/14 14:01
 * @Description
 *      梧桐树
 **/
public class BBSCrawlerScheduleJob {

    public static String domainId = "bbs";
    private static String startUrl = "http://www.zju1.com/";
    private static CrawlerJobCommand crawlerJobCommand = new CrawlerJobCommand("192.168.1.215",9599);


    public static void main(String[] args) {

        publishCrawlerScheduleJobInfo();
    }

    public static CrawlerJob publishCrawlerScheduleJobInfo(){


        CrawlerJob bbsCrawlerSchdule = bbsCrawlerSchdule(domainId);

        //发布定时采集作业
        CrawlerScheduleJob crawlerScheduleJob = new CrawlerScheduleJob();
        crawlerScheduleJob.setDomain(domainId);
        crawlerScheduleJob.setCrawlerJob(JSON.toJSONString(bbsCrawlerSchdule));
        crawlerScheduleJob.setJobType(CrawlerMetaConstant.ScheduleCrawlerJobType.crawler.enumVal());
        crawlerScheduleJob.setNote("梧桐树定时采集");
        crawlerScheduleJob.setCrawlerKey(bbsCrawlerSchdule.generateCrawlerKey());
        HttpPage page = crawlerJobCommand.addOrUpdateCrawlerScheduleJob(crawlerScheduleJob);
        System.out.println("发布crawler作业：" + page.getRawText());
        return bbsCrawlerSchdule;

    }

    public static CrawlerJob bbsCrawlerSchdule(String domainId) {

        CrawlerRequestRecord requestRecord = CrawlerRequestRecord.builder()
                .startPageRequest(domainId, turnPage)
                .httpUrl(startUrl)//不添加recordKey方法，默认以地址作为key值进行过滤操作
                .releaseTime(System.currentTimeMillis())
                .filter(CrawlerEnum.CrawlerRecordFilter.keyOrDateRange)
                .addFilterInfo(FilterUtils.redisFilterKeyInfo(domainId))
//                .addFilterInfo(FilterUtils.memoryFilterKeyInfo())
                .addFilterInfo(FilterUtils.dateRangeFilterInfo(24 * 5 ,null))
                .build();

        return CrawlerJob.builder()
                .triggerInfo(
                        domainId,
                        CrawlerMetaConstant.ScheduleJobTrigger_Cron,
                        System.currentTimeMillis(),
                        CrawlerMetaConstant.ScheduleJobTriggerJob_Realtime)
                .crawlerRequestQueue(CrawlerMetaConstant.redisRequestQueue(StringUtils.joinWith("-","crawler",domainId,"queue")))
                .fileResultPipeline(null, "/data/chance_crawler_test/data/bbs/bbs.log", false)
                .kafkaResultPipeline(null,"tmp_news",null)
                .requestRecord(requestRecord)
                .build();
    }

}
