package com.chance.cc.crawler.development.scripts.autohome.article;

import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;
import com.chance.cc.crawler.core.CrawlerEnum;
import com.chance.cc.crawler.core.CrawlerRecordContext;
import com.chance.cc.crawler.core.downloader.HttpConfig;
import com.chance.cc.crawler.core.downloader.HttpPage;
import com.chance.cc.crawler.core.filter.FilterUtils;
import com.chance.cc.crawler.core.pipeline.result.CrawlerData;
import com.chance.cc.crawler.core.record.CrawlerRequestRecord;
import com.chance.cc.crawler.core.script.crawler.CrawlerCommonScript;
import com.chance.cc.crawler.core.tags.CategoryTag;
import org.apache.commons.io.IOUtils;
import org.apache.commons.lang3.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.io.FileInputStream;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;

/**
 * @author lt
 * @version 1.0
 * @date 2021-01-28 16:16:55
 * @email okprog@sina.com
 */
public class AutoHomeSyncArticleUrlsScript extends CrawlerCommonScript {

    private Logger logger = LoggerFactory.getLogger(AutoHomeSyncArticleUrlsScript.class);

    public static final String indexRegex = "https?://www\\.autohome\\.com\\.cn/#article_urls";
    private static final String scriptSite = "article_urls";

    @Override
    public List<CrawlerRequestRecord> parseLinks(CrawlerRequestRecord crawlerRecord, HttpPage page) {
        List<CrawlerRequestRecord> parsedLinks = new ArrayList<>();
        String a = crawlerRecord.getHttpRequest().getUrl();
        logger.info(a);
        CategoryTag categoryTag = crawlerRecord.tagsCreator().bizTags().getCategoryTag();
        String domain = categoryTag.getKVTagStrVal("domain");
        String site = categoryTag.getKVTagStrVal("site");
        String filePath = categoryTag.getKVTagStrVal("filePath");
        try {
            List<String> urlLines = IOUtils.readLines(new FileInputStream(filePath));
            int count = 0;
            for (String urlLine : urlLines) {
                JSONObject urlObj = JSONObject.parseObject(urlLine);
                String url = urlObj.getString("链接");
                String nextSite = "";
                if (url.matches("https?://www\\.autohome\\.com\\.cn/\\w*/\\S*/\\S*\\.html")){
                    count ++;
                    if (!url.startsWith("https")){
                        url = url.replace("http","https");
                    }
                    nextSite = "article";
                }
                if (StringUtils.isBlank(nextSite)){
                    continue;
                }
                CrawlerRequestRecord crawlerRequestRecord = CrawlerRequestRecord.builder()
                        .startPageRequest(domain, CrawlerEnum.CrawlerRequestType.turnPageItem)
                        .httpUrl(url)
                        .recordKey(url)
                        .releaseTime(System.currentTimeMillis())
                        .httpConfig(HttpConfig.me(domain))
                        .filter(CrawlerEnum.CrawlerRecordFilter.key)
                        .addFilterInfo(FilterUtils.memoryFilterKeyInfo(domain))
                        .needParsed(true)
                        .needWashed(true)
                        .resultLabelTag(CrawlerEnum.CrawlerDataType.article)
                        .resultLabelTag(CrawlerEnum.CrawlerDataType.interaction)
                        .build();

                crawlerRequestRecord.tagsCreator().bizTags().addDomain(domain);
                crawlerRequestRecord.tagsCreator().bizTags().addSite(nextSite);
                parsedLinks.add(crawlerRequestRecord);
            }
            logger.info("init " + domain + " " + site + " total: " + count);
        }catch (Exception e){
            logger.error(e.getMessage(),e);
        }
        return parsedLinks;
    }

    @Override
    public List<CrawlerData> washPage(CrawlerRequestRecord crawlerRecord, HttpPage page) {
        List<CrawlerData> crawlerDataList = new ArrayList<>();
        CategoryTag categoryTag = crawlerRecord.tagsCreator().bizTags().getCategoryTag();
        String domain = categoryTag.getKVTagStrVal("domain");
        String site = categoryTag.getKVTagStrVal("site");
        String filePath = categoryTag.getKVTagStrVal("filePath");
        try {
            List<String> urlLines = IOUtils.readLines(new FileInputStream(filePath));
            int count = 0;
            for (String urlLine : urlLines) {
                JSONObject urlObj = JSONObject.parseObject(urlLine);
                String url = urlObj.getString("链接");
                if (url.matches("https?://www\\.autohome\\.com\\.cn/\\w*/\\S*/\\S*\\.html")){
                    count ++;

                    if (!url.startsWith("https")){
                        url = url.replace("http","https");
                    }
                    String key = url.substring(url.lastIndexOf("/") + 1).split("\\.")[0];
                    Map<String,String> contentMap = new HashMap<>();
                    contentMap.put("url",url);

                    CrawlerData crawlerData = CrawlerData.builder()
                            .data(crawlerRecord,page)
                            .dataId(StringUtils.joinWith("-",domain,site,System.currentTimeMillis()))
                            .releaseTime(System.currentTimeMillis())
                            .build();

                    crawlerData.setCrawlerContent(JSON.toJSONString(contentMap));
                    crawlerDataList.add(crawlerData);
                }
            }
            logger.info("init " + domain + " " + site + " total: " + count);
            return crawlerDataList;
        }catch (Exception e){
            logger.error(e.getMessage(),e);
        }
        return null;
    }

    @Override
    public void initUrlRegulars() {
        addUrlRegular(indexRegex);
    }

    @Override
    public boolean crawlerCheck(CrawlerRequestRecord crawlerRequestRecord) {
        CategoryTag categoryTag = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag();
        String crawlerSite = categoryTag.getKVTagStrVal("site");
        return crawlerSite.equalsIgnoreCase(scriptSite);
    }

    @Override
    public void afterExecute(CrawlerRecordContext context) {

    }

    @Override
    public String domain() {
        return "autohome";
    }
}
