package com.chance.cc.crawler.development.command.job.domain.xhs.daily;

import com.alibaba.fastjson.JSON;
import com.chance.cc.crawler.core.CrawlerEnum;
import com.chance.cc.crawler.core.CrawlerJob;
import com.chance.cc.crawler.core.downloader.HttpConstant;
import com.chance.cc.crawler.core.downloader.HttpPage;
import com.chance.cc.crawler.core.downloader.HttpRequestBody;
import com.chance.cc.crawler.core.downloader.proxy.Proxy;
import com.chance.cc.crawler.core.filter.FilterUtils;
import com.chance.cc.crawler.core.queue.crawler.CrawlerQueueConfig;
import com.chance.cc.crawler.core.record.CrawlerRequestRecord;
import com.chance.cc.crawler.development.command.job.CrawlerJobCommand;
import com.chance.cc.crawler.meta.core.bean.CrawlerMetaConstant;
import com.chance.cc.crawler.meta.core.bean.job.CrawlerScheduleJob;
import org.apache.commons.lang3.StringUtils;

import static com.chance.cc.crawler.core.CrawlerEnum.CrawlerRequestType.*;
import static com.chance.cc.crawler.development.command.publish.douyin.DYPublishCrawlerJob.metaServerIP;
import static com.chance.cc.crawler.development.command.publish.douyin.DYPublishCrawlerJob.metaServerPort;

/**
 * @author bx
 * @date 2020/12/6 0006 12:44
 */
public class XHSRealTimeCommentCrawlerScheduleJob {

    public static String domainId = "xhs";
    public static String site = "xhs-comment";
    public static final String commentStartUrl = "https://start/fe_api/burdock/weixin/v2/notes/start/comments/start";
    private static CrawlerJobCommand crawlerJobCommand = new CrawlerJobCommand(metaServerIP,metaServerPort);
        private static final String metaServer = "http://192.168.1.215:9599";
//    private static final String metaServer = "http://localhost:9599";


    private static Proxy proxy = new Proxy();

    static {
        //代理配置
        proxy.setHost("http-dyn.abuyun.com");
        proxy.setPort(9020);
        proxy.setUsername("HL89Q19E86E2987D");
        proxy.setPassword("71F33D94CE5F7BF2");
    }

    public static void main(String[] args) {

//        publishSearchCrawlerScheduleJobInfo();
        System.out.println(crawlerSchduler().generateCrawlerTriggerKey());
    }

    public static CrawlerJob publishSearchCrawlerScheduleJobInfo(){


        CrawlerJob crawlerSchduler =  crawlerSchduler();

        //发布定时采集作业
        CrawlerScheduleJob crawlerScheduleJob = new CrawlerScheduleJob();
        crawlerScheduleJob.setDomain(domainId);
        crawlerScheduleJob.setCrawlerJob(JSON.toJSONString(crawlerSchduler));
        crawlerScheduleJob.setJobType(CrawlerMetaConstant.ScheduleCrawlerJobType.crawler.enumVal());
        crawlerScheduleJob.setNote("小红书评论定时采集");
        crawlerScheduleJob.setCrawlerKey(crawlerSchduler.generateCrawlerKey());
        HttpPage page = crawlerJobCommand.addOrUpdateCrawlerScheduleJob(crawlerScheduleJob);
        System.out.println("发布crawler作业：" + page.getRawText());
        return crawlerSchduler;

    }

    /**
     *
     * @return
     */
    public static CrawlerJob crawlerSchduler() {

        CrawlerRequestRecord crawlerCommentRequestRecord = CrawlerRequestRecord.builder()
                .startPageRequest(domainId,turnPage)
                .httpUrl(commentStartUrl)
                .releaseTime(System.currentTimeMillis())
                .needWashed(false)
                .needParsed(false)
                .filter(CrawlerEnum.CrawlerRecordFilter.key)
                .addFilterInfo(FilterUtils.redisFilterKeyInfo(StringUtils.joinWith("-","filter",domainId,site,"queue")))
                .proxy(proxy)
                .build();
        crawlerCommentRequestRecord.getHttpConfig().setResponseTextGenerateHtml(false);
        crawlerCommentRequestRecord.setDownload(false);
        crawlerCommentRequestRecord.setSkipPipeline(true);

        crawlerCommentRequestRecord.tagsCreator().bizTags().addDomain(domainId);
        crawlerCommentRequestRecord.tagsCreator().bizTags().addSite(site);

        String requestQueueName = StringUtils.joinWith("-","crawler",domainId,site,"queue");

        String articleAccumQueueName = XHSArticleCrawlerScheduleJob.articleAccumQueue;
        CrawlerQueueConfig crawlerQueueConfig = CrawlerMetaConstant.redisRequestQueue(articleAccumQueueName);
        crawlerQueueConfig.setQueueContent(CrawlerQueueConfig.Content.result);
        crawlerQueueConfig.setQueueStructure(CrawlerQueueConfig.Structure.map);

        CrawlerRequestRecord userOauthInfoCrawlerRecord = CrawlerRequestRecord.builder()
                .startPageRequest("xhs_user_oauth_infos", turnPageItem)
                .httpUrl(metaServer + "/crawler/oauth/api/v1/xhs/userOauthInfos")
                .requestLabelTag(supportSource)
                .requestLabelTag(internalDownload)
                .build();


        CrawlerRequestRecord requestRecord = CrawlerRequestRecord.builder()
                .startPageRequest(domainId,turnPage)
                .httpUrl(commentStartUrl)
                .releaseTime(System.currentTimeMillis())
                .needWashed(true)
                .proxy(proxy)
                .filter(CrawlerEnum.CrawlerRecordFilter.key)
                .addFilterInfo(FilterUtils.redisFilterKeyInfo(StringUtils.joinWith("-","filter",domainId,site,"queue")))
                .build();
        requestRecord.tagsCreator().bizTags().addDomain(domainId);
        requestRecord.tagsCreator().bizTags().addSite(site);

        String initUrl = metaServer + "/crawler/domain/common/api/v1/xhs/realtime/comment/queue/init?" +
                "crawlerArticleIdsQueueName=%s&crawlerCommentQueueName=%s&articleAccumQueueName=%s";
        CrawlerRequestRecord initRealTimeCommentCrawlerRecord = CrawlerRequestRecord.builder()
                .startPageRequest("xhs_comment_init", turnPageItem)
                .httpUrl(String.format(initUrl,XHSArticleCrawlerScheduleJob.articleIdsQueue,requestQueueName,articleAccumQueueName))
                .requestLabelTag(supportSource)
                .requestLabelTag(internalDownload)
                .build();
        HttpRequestBody jsonBody = HttpRequestBody.json(JSON.toJSONString(requestRecord), "utf-8");
        initRealTimeCommentCrawlerRecord.getHttpRequest().setMethod(HttpConstant.Method.POST);
        initRealTimeCommentCrawlerRecord.getHttpRequest().setRequestBody(jsonBody);


        return CrawlerJob.builder()
                .triggerInfo(
                        domainId,
                        CrawlerMetaConstant.ScheduleJobTrigger_Cron,
                        System.currentTimeMillis(),
                        StringUtils.joinWith("-", domainId, site, "realtime",CrawlerMetaConstant.ScheduleJobTriggerJob_Realtime))
                .crawlerJobThreadNumber(9) //需要和微信账户数量对应，不能多于微信账户数量
                .crawlerRequestQueue(CrawlerMetaConstant.redisRequestQueue(requestQueueName))
                .kafkaResultPipeline("article_comment","tmp_social_media",null)
                .queueResultPipeline("article_accumulate",crawlerQueueConfig)
                .requestRecord(crawlerCommentRequestRecord)
                .supportRecord(userOauthInfoCrawlerRecord)
                .supportRecord(initRealTimeCommentCrawlerRecord)
                .restWhileNon(false)
                .build();
    }
}
