package com.chance.cc.crawler.development.scripts.guoke;

import com.chance.cc.crawler.core.CrawlerRecordContext;
import com.chance.cc.crawler.core.downloader.HttpPage;
import com.chance.cc.crawler.core.pipeline.result.CrawlerData;
import com.chance.cc.crawler.core.record.CrawlerRequestRecord;
import com.chance.cc.crawler.core.script.crawler.CrawlerCommonScript;
import com.chance.cc.crawler.core.selector.Json;
import com.chance.cc.crawler.core.tags.CategoryTag;
import com.chance.cc.crawler.core.tags.crawler.CrawlerResultTags;
import org.apache.commons.lang3.StringUtils;

import java.text.DateFormat;
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;

import static com.chance.cc.crawler.core.CrawlerEnum.CrawlerDataType.article;
import static com.chance.cc.crawler.core.CrawlerEnum.CrawlerDataType.interaction;
import static com.chance.cc.crawler.development.scripts.allfeild.AICCommonField.*;

public class GuoKrScript extends CrawlerCommonScript {
    private static final String DOMAIN = "guokr";
    private static final String SITE = "news";
    private static final String START_URL_REGEX = "https://www.guokr.com/";
    private static final String NEXT_PAGE_REGEX = "https://www.guokr.com/beta/proxy/science_api/articles\\?limit=16&page=\\S*";
    private static final String FIRSTPAGE_REGEX = "https://www.guokr.com/beta/proxy/science_api/articles\\?limit=16&page=1";
    private static final String FIRSTPAGE_URL = "https://www.guokr.com/beta/proxy/science_api/articles?limit=16&page=1";
    private static final String DETAILTS_REGEX = "https://www.guokr.com/article/\\S*";

    private static final String NEXT_PAGE_FORMAT = "https://www.guokr.com/beta/proxy/science_api/articles?limit=16&page=%s";

    @Override
    public String domain() {
        return DOMAIN;
    }

    @Override
    public void initUrlRegulars() {
        addUrlRegular(START_URL_REGEX);
        addUrlRegular(NEXT_PAGE_REGEX);
        addUrlRegular(FIRSTPAGE_REGEX);
        addUrlRegular(DETAILTS_REGEX);
    }

    @Override
    public boolean crawlerCheck(CrawlerRequestRecord crawlerRequestRecord) {
        CategoryTag categoryTag = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag();
        String site = categoryTag.getKVTagStrVal("site");
        return SITE.equalsIgnoreCase(site);
    }

    @Override
    public void afterExecute(CrawlerRecordContext crawlerRecordContext) {

    }

    @Override
    public List<CrawlerRequestRecord> parseLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerRequestRecord> parseLinks = new ArrayList<>();
        String url = crawlerRequestRecord.getHttpRequest().getUrl();
        if(url.matches(START_URL_REGEX))
            parseStartPage(crawlerRequestRecord,httpPage,parseLinks);
        if(url.matches(FIRSTPAGE_REGEX))
            parseNextPage(crawlerRequestRecord,httpPage,parseLinks);
        if(url.matches(NEXT_PAGE_REGEX))
            parseDetails(crawlerRequestRecord,httpPage,parseLinks);
        return parseLinks;
    }
    public void parseStartPage(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage,List<CrawlerRequestRecord> parseLinks){
        CrawlerRequestRecord startRecord = CrawlerRequestRecord.builder()
                .itemPageRequest(crawlerRequestRecord)
                .recordKey(FIRSTPAGE_URL)
                .httpUrl(FIRSTPAGE_URL)
                .releaseTime(System.currentTimeMillis())
                .copyBizTags()
                .copyResultTags()
                .needWashed(false)
                .build();
        parseLinks.add(startRecord);
    }
    public void parseNextPage(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage,List<CrawlerRequestRecord> parseLinks){
        String urlCurrent = crawlerRequestRecord.getHttpRequest().getUrl();
        Object page = getUrlParams(urlCurrent).get("page");
        int pageNext = 0;
        if(page != null)  pageNext = Integer.parseInt((String)page)+1;
        String nextUrl = String.format(NEXT_PAGE_FORMAT,pageNext);
        CrawlerRequestRecord nextRecord = CrawlerRequestRecord.builder()
                .turnPageRequest(crawlerRequestRecord)
                .httpUrl(nextUrl)
                .recordKey(nextUrl)
                .releaseTime(System.currentTimeMillis())
                .copyBizTags()
                .copyResultTags()
                .needWashed(false)
                .build();
        parseLinks.add(nextRecord);

    }
    public void parseDetails(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage,List<CrawlerRequestRecord> parseLinks){
        Json json = httpPage.getJson();
        int id_count = json.jsonPath($_type + "[*].id").all().size();
        for(int i = 0;i < id_count; i++){//1 --460480无效
            String articleId = json.jsonPath($_type + "[" + i + "].id").get();
            String article_url = String.format("https://www.guokr.com/article/%s",articleId);
            String publish_date = json.jsonPath($_type+"["+i+"].date_published").get();
            DateFormat df2 = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss");
            Long news_published_date = null;
            try {
                news_published_date = df2.parse(publish_date).getTime();
            } catch (ParseException e) {
                e.printStackTrace();
            }
            CrawlerRequestRecord detailsRecord = CrawlerRequestRecord.builder()
                    .itemPageRequest(crawlerRequestRecord)
                    .httpUrl(article_url)
                    .recordKey(article_url)
                    .releaseTime(news_published_date)
                    .copyBizTags()
                    .copyResultTags()
                    .build();
            detailsRecord.getHttpRequest().addExtra("published_date",news_published_date.toString());
            detailsRecord.getHttpRequest().addExtra("articleId",articleId);
            parseLinks.add(detailsRecord);
        }
    }
    @Override
    public List<CrawlerData> washPage(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerData> crawlerDataList = new ArrayList<>();
        CrawlerResultTags crawlerResultTags = crawlerRequestRecord.tagsCreator().resultTags();
        if(crawlerResultTags.hasDataType(article))
            crawlerDataList.add(washArticle(crawlerRequestRecord,httpPage));
        if(crawlerResultTags.hasDataType(interaction))
            crawlerDataList.add(washInteraction(crawlerRequestRecord,httpPage));
        return crawlerDataList;
    }
    public CrawlerData washArticle(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage){
        String url = httpPage.getRequest().getUrl();
        String articleKey = url.substring(url.lastIndexOf("/") + 1);
        String author = httpPage.getHtml().xpath("//p[@class=\"nickname\"]//text()").get();
        String title = httpPage.getHtml().xpath("//div[@class=\"layout__Skeleton-zgzfsa-3 styled__ArticleTitle-sc-1ctyfcr-0 ejzYGj\"]/text()").get();
        List<String> contents = httpPage.getHtml().xpath("//div[@id=\"js_content\"]/section/section//text()").all();
        StringBuffer content = new StringBuffer();
        StringBuffer images = new StringBuffer();
        String published_date = (String) crawlerRequestRecord.getHttpRequest().getExtras().get("published_date");
        for (String con : contents) {
            content.append(con);
        }
        List<String> imgList = httpPage.getHtml().xpath("//img[@src]/@src").all();
        for(int i = 0;i<imgList.size()-1;i++){//最后一张图片链接为头像，不属于文章的一部分
            if(imgList.get(i).length()<100)//排除一个很长的链接，不是图片
                images.append(imgList.get(i)).append("\\x01");
        }
        CrawlerData articleData = CrawlerData.builder()
                .data(crawlerRequestRecord,httpPage)
                .dataId(StringUtils.joinWith("-",crawlerRequestRecord.getDomain(),article,articleKey))
                .releaseTime(Long.valueOf(published_date))
                .addContentKV(Field_Author,removeBlank(author))
                .addContentKV(Field_Title,removeBlank(title))
                .addContentKV(Field_Content,removeBlank(content.toString()))
                .addContentKV(Field_Images,removeBlank(images.toString()))
                .resultLabelTag(article)
                .build();
        return articleData;
    }
    public CrawlerData washInteraction(CrawlerRequestRecord crawlerRequestRecord,HttpPage httpPage){
        String likes = httpPage.getHtml().xpath("//span[@class=\"styled__Count-myecm7-1 gWfOpe\"]/text()").get();
        String articleId = (String) crawlerRequestRecord.getHttpRequest().getExtras().get("articleId");
        CrawlerData crawlerData = CrawlerData.builder()
                .data(crawlerRequestRecord,httpPage)
                .dataId(StringUtils.joinWith("-",crawlerRequestRecord.getDomain(),interaction,articleId))
                .releaseTime(System.currentTimeMillis())
                .addContentKV(Field_I_Likes,likes==null?"0":likes)
                .resultLabelTag(interaction)
                .build();
        return crawlerData;
    }
    private String removeBlank(String blank){
        if(StringUtils.isNotEmpty(blank))
            return blank.replace(" ","");
        else
            return blank;
    }
    //解析url的参数
    private Map<String, Object> getUrlParams(String url) {
        Map<String, Object> map = new HashMap<String, Object>(0);
        String param = url;
        if (url.contains("?")) {
            param = url.split("\\?")[1];
        }
        if (StringUtils.isBlank(param)) {
            return null;
        }
        String[] params = param.split("&");
        for (String s : params) {
            String[] p = s.split("=");
            if (p.length == 2) {
                map.put(p[0], p[1]);
            }
        }
        return map;
    }

}
