package com.chance.cc.crawler.development.scripts.ifeng.auto;

import com.alibaba.fastjson.JSONArray;
import com.alibaba.fastjson.JSONObject;
import com.chance.cc.crawler.core.CrawlerEnum;
import com.chance.cc.crawler.core.CrawlerRecordContext;
import com.chance.cc.crawler.core.downloader.HttpPage;
import com.chance.cc.crawler.core.filter.FilterInfo;
import com.chance.cc.crawler.core.pipeline.result.CrawlerData;
import com.chance.cc.crawler.core.record.CrawlerRequestRecord;
import com.chance.cc.crawler.core.script.crawler.CrawlerCommonScript;
import org.apache.commons.lang3.time.DateUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.text.ParseException;
import java.util.ArrayList;
import java.util.List;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

/**
 * @ClassName ifeng
 * @Description TODO
 * @Author songding
 * @Date 2021/9/26 13:35
 * @Version 1.0
 **/
public class IFengAutoCrawlerScript extends CrawlerCommonScript {
    private static Logger log = LoggerFactory.getLogger(IFengAutoCrawlerScript.class);
    private static final String domain = "ifeng";
    private static final String site = "ifengAuto";

    private static final String ifengUrl = "https://auto.ifeng.com/";
    private static final String ifengRegular= "https://auto.ifeng.com/\\S*/";
    private static final String listRegular = "https://shankapi.ifeng.com/shanklist/getColumnInfo3/\\S*";

    private static final String listUrl = "https://shankapi.ifeng.com/shanklist/getColumnInfo3/%s/%s/20/10-602-/getColumnInfoCallback?callback=getColumnInfoCallback&_=%s";
    @Override
    public void initUrlRegulars() {
        addUrlRegular(ifengUrl);
        addUrlRegular(ifengRegular);
        addUrlRegular(listRegular);
    }
    @Override
    public List<CrawlerRequestRecord> parseLinks(CrawlerRequestRecord crawlerRecord, HttpPage page) {
        List<CrawlerRequestRecord> parseLinks = new ArrayList<>();
        if (page.getStatusCode() != 200 || page.isDownloadSuccess() != true){
            log.error("download error or page != 200  code="+page.getStatusCode());
            this.recordAgainDownload(crawlerRecord,page,parseLinks);
        }

        String url = crawlerRecord.getHttpRequest().getUrl();
        if (url.matches(ifengUrl)){
            this.getPlateUrl(crawlerRecord,page,parseLinks);
        }
        if (url.matches(ifengRegular)){
            this.getParseLinks(crawlerRecord,page,parseLinks);
        }
        if (url.matches(listRegular)){
            this.getArticle(crawlerRecord,page,parseLinks);
        }
        return parseLinks;
    }

    private void getArticle(CrawlerRequestRecord crawlerRecord, HttpPage page, List<CrawlerRequestRecord> parseLinks) {
        String rawText = page.getRawText();
        String[] split = rawText.split("getColumnInfoCallback\\(");
        String s = split[1].substring(0,split[1].length()-1);
        JSONObject jsonObject2 = JSONObject.parseObject(s);
        String message = jsonObject2.getString("message");
        if (!message.equals("成功")){
            log.error("获取错误");
            return;
        }
        JSONObject data = jsonObject2.getJSONObject("data");
        JSONArray newsstream = data.getJSONArray("newsstream");
        for (Object obj : newsstream){
            JSONObject jsonObject1 = JSONObject.parseObject((String) obj);
            String newsTime = jsonObject1.getString("newsTime");
            String id = jsonObject1.getString("id");
            Long realtime = 0l;
            try {
                realtime = DateUtils.parseDate(newsTime,"yyyy-MM-dd HH:mm:ss").getTime();
            } catch (ParseException e) {
                e.printStackTrace();
            }
            if (!isDateRange(crawlerRecord,realtime)){
                return;
            }
            String url = jsonObject1.getString("url");
            CrawlerRequestRecord record = CrawlerRequestRecord.builder()
                    .itemPageRequest(crawlerRecord)
                    .releaseTime(realtime)
                    .httpUrl(url)
                    .copyBizTags()
                    .copyResultTags()
                    .build();
            record.tagsCreator().bizTags().addSite("SeriesArticle");
            record.tagsCreator().bizTags().addCustomKV("seriesId",id);
            parseLinks.add(record);
        }
        //获取下一页
        Object o = newsstream.get(newsstream.size() - 1);
        JSONObject jsonObject1 = JSONObject.parseObject((String) o);
        String newsTime = jsonObject1.getString("newsTime");
        Long realtime = 0l;
        try {
            realtime = DateUtils.parseDate(newsTime,"yyyy-MM-dd HH:mm:ss").getTime();
        } catch (ParseException e) {
            e.printStackTrace();
        }
        String id = jsonObject1.getString("id");
        String url = String.format(listUrl,id,realtime,System.currentTimeMillis());
        CrawlerRequestRecord record = CrawlerRequestRecord.builder()
                .itemPageRequest(crawlerRecord)
                .httpUrl(url)
                .releaseTime(realtime)
                .copyBizTags()
                .copyResultTags()
                .build();
        parseLinks.add(record);

    }

    private void getParseLinks(CrawlerRequestRecord crawlerRecord, HttpPage page, List<CrawlerRequestRecord> parseLinks) {
        String rawText = page.getRawText();
        Matcher mtData = Pattern.compile("var allData = (\\{.*\\});").matcher(rawText);
        while(mtData.find()){
            String allData = mtData.group(0).split("allData =")[1].trim();
            allData = allData.substring(0, allData.lastIndexOf(";"));
            JSONObject jsonObject = JSONObject.parseObject(allData);
            JSONArray newsstream = jsonObject.getJSONArray("newsstream");
            for (Object obj : newsstream){
                JSONObject jsonObject1 = JSONObject.parseObject((String) obj);
                String newsTime = jsonObject1.getString("newsTime");
                String id = jsonObject1.getString("id");
                Long realtime = 0l;
                try {
                    realtime = DateUtils.parseDate(newsTime,"yyyy-MM-dd HH:mm:ss").getTime();
                } catch (ParseException e) {
                    e.printStackTrace();
                }
                if (!isDateRange(crawlerRecord,realtime)){
                    return;
                }
                String url = jsonObject1.getString("url");
                CrawlerRequestRecord record = CrawlerRequestRecord.builder()
                        .itemPageRequest(crawlerRecord)
                        .releaseTime(realtime)
                        .httpUrl(url)
                        .copyBizTags()
                        .copyResultTags()
                        .build();
                record.tagsCreator().bizTags().addSite("SeriesArticle");
                record.tagsCreator().bizTags().addCustomKV("seriesId",id);
                parseLinks.add(record);
            }
            //获取下一页
            Object o = newsstream.get(newsstream.size() - 1);
            JSONObject jsonObject1 = JSONObject.parseObject((String) o);
            String newsTime = jsonObject1.getString("newsTime");
            Long realtime = 0l;
            try {
                realtime = DateUtils.parseDate(newsTime,"yyyy-MM-dd HH:mm:ss").getTime();
            } catch (ParseException e) {
                e.printStackTrace();
            }
            String id = jsonObject1.getString("id");
            String url = String.format(listUrl,id,realtime,System.currentTimeMillis());
            CrawlerRequestRecord record = CrawlerRequestRecord.builder()
                    .itemPageRequest(crawlerRecord)
                    .httpUrl(url)
                    .releaseTime(realtime)
                    .copyBizTags()
                    .copyResultTags()
                    .build();
            parseLinks.add(record);
        }
    }

    /*
    * 得到板块
    * */
    private void getPlateUrl(CrawlerRequestRecord crawlerRecord, HttpPage page, List<CrawlerRequestRecord> parseLinks) {
        String xincheUrl = ifengUrl + "xinche/";
        this.toRecord(crawlerRecord,xincheUrl,parseLinks);
        String shijiaUrl = ifengUrl + "shijia/";
        this.toRecord(crawlerRecord,shijiaUrl,parseLinks);
        String daogouUrl = ifengUrl + "daogou/";
        this.toRecord(crawlerRecord,daogouUrl,parseLinks);
    }

    private void toRecord(CrawlerRequestRecord crawlerRecord, String url, List<CrawlerRequestRecord> parseLinks) {
        CrawlerRequestRecord record = CrawlerRequestRecord.builder()
                .itemPageRequest(crawlerRecord)
                .httpUrl(url)
                .releaseTime(System.currentTimeMillis())
                .copyBizTags()
                .copyResultTags()
                .build();
        parseLinks.add(record);
    }


    @Override
    public List<CrawlerData> washPage(CrawlerRequestRecord crawlerRecord, HttpPage page) {
        return null;
    }





    @Override
    public boolean crawlerCheck(CrawlerRequestRecord crawlerRecord) {
        return crawlerRecord.tagsCreator().bizTags().site().equals(site);
    }

    @Override
    public void afterExecute(CrawlerRecordContext context) {

    }

    @Override
    public String domain() {
        return domain;
    }
    private void recordAgainDownload(CrawlerRequestRecord crawlerRecord, HttpPage page, List<CrawlerRequestRecord> parseLinks) {
        String count = crawlerRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("count");
        Integer integerCount = 0;
        if (count == null){
            integerCount = 1;
        }else{
            integerCount = Integer.valueOf(count);
            if (integerCount >= 10){
                log.error("The number of downloads exceeds the limit");
                return;
            }
            integerCount+=1;
        }
        CrawlerRequestRecord record = CrawlerRequestRecord.builder()
                .itemPageRequest(crawlerRecord)
                .httpUrl(crawlerRecord.getHttpRequest().getUrl())
                .releaseTime(System.currentTimeMillis())
                .copyBizTags()
                .recordKey(crawlerRecord.getRecordKey()+integerCount)
                .copyResultTags()
                .build();
        record.tagsCreator().bizTags().addCustomKV("count",integerCount);
        parseLinks.add(record);
    }
    /**
     * 判断是否在时间范围内
     * @param crawlerRequestRecord
     * @return
     */
    private boolean isDateRange(CrawlerRequestRecord crawlerRequestRecord,Long releaseTimeToLong){
        boolean isRange = false;
        CrawlerEnum.CrawlerRecordFilter filter = crawlerRequestRecord.getFilter();
        if (filter == CrawlerEnum.CrawlerRecordFilter.keyOrDateRange || filter == CrawlerEnum.CrawlerRecordFilter.dateRange) {
            List<FilterInfo> filterInfos = crawlerRequestRecord.getFilterInfos();
            Long startTime = null;
            Long endTime = null;
            for (FilterInfo filterInfo : filterInfos) {
                if (filterInfo.getFilter() == CrawlerEnum.CrawlerRecordFilter.dateRange) {
                    long[] dateAllowRange = filterInfo.getDateAllowRange();
                    int hourFromNow = filterInfo.getHourFromNow();
                    if (dateAllowRange != null) {
                        startTime = dateAllowRange[0];
                        endTime = dateAllowRange[1];
                    }else if(hourFromNow != 0){
                        endTime = System.currentTimeMillis()-60000;//系统时间减去一分钟
                        startTime = endTime - DateUtils.MILLIS_PER_HOUR * hourFromNow;
                    }
                }
            }
            if(startTime != null && releaseTimeToLong != 0 && startTime <= releaseTimeToLong && releaseTimeToLong <= endTime){
                isRange = true;
            }
        }else{
            isRange = true;
        }
        return isRange;
    }
}
