package com.chance.cc.crawler.development.scripts.shgov;

import com.chance.cc.crawler.core.CrawlerEnum;
import com.chance.cc.crawler.core.CrawlerRecordContext;
import com.chance.cc.crawler.core.downloader.HttpPage;
import com.chance.cc.crawler.core.pipeline.result.CrawlerData;
import com.chance.cc.crawler.core.record.CrawlerRequestRecord;
import com.chance.cc.crawler.core.script.crawler.CrawlerCommonScript;
import com.chance.cc.crawler.core.selector.Selectable;
import com.chance.cc.crawler.core.tags.crawler.CrawlerResultTags;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.time.DateUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.text.ParseException;
import java.util.*;

import static com.chance.cc.crawler.core.CrawlerEnum.CrawlerDataType.article;
import static com.chance.cc.crawler.core.CrawlerEnum.CrawlerDataType.interaction;

public class ShgovCrawlerScript extends CrawlerCommonScript {
    private Logger log= LoggerFactory.getLogger(ShgovCrawlerScript.class);
    private static final String url="http://www.sh.gov.cn";
    @Override
    public String domain() {
        return "shgov";
    }

    @Override
    public boolean crawlerCheck(CrawlerRequestRecord crawlerRequestRecord) {
        return true;
    }

    @Override
    public void initUrlRegulars() {
        addUrlRegular("http://www.sh.gov.cn/");
        addUrlRegular("http://www\\.sh\\.gov\\.cn/\\S*/\\S*.html");
        addUrlRegular("http://www\\.sh\\.gov\\.cn/\\S*/\\d*/\\S*.html");
    }

    @Override
    public List<CrawlerRequestRecord> parseLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerRequestRecord> parsedLinks=new ArrayList<>();
        String requestUrl = httpPage.getRequest().getUrl();
        if(requestUrl.matches("http://www.sh.gov.cn/")){
            List<String> websites = Arrays.asList("nw15343","nw31406","nw4411","nw18454");
            for (String website : websites) {
                String listUrl=url+"/"+website+"/index.html";
                CrawlerRequestRecord turnPageRequest = CrawlerRequestRecord.builder()
                        .itemPageRequest(crawlerRequestRecord)
                        .recordKey(listUrl)
                        .httpUrl(listUrl)
                        .releaseTime(System.currentTimeMillis())
                        .build();

                parsedLinks.add(turnPageRequest);
            }
        }
        else if(requestUrl.matches("http://www\\.sh\\.gov\\.cn/\\S*/\\S*.html")){
            String nextPageUrl = null;
            if (requestUrl.contains("_")){
                String nextBtn = httpPage.getHtml().xpath("//div[@name=\"whj_nextPage\"]/text()").get();
//                if (null != nextBtn){
                    int pageNum = Integer.parseInt(requestUrl.substring(requestUrl.lastIndexOf("_") + 1).split("\\.")[0]);
                    nextPageUrl = requestUrl.substring(0,requestUrl.lastIndexOf("_") + 1) + (pageNum + 1) + ".html";
//                }
            }else {
                nextPageUrl = requestUrl.substring(0,requestUrl.lastIndexOf("/") + 1) + "index_2.html";
            }
            CrawlerRequestRecord turnPageRequest = CrawlerRequestRecord.builder()
                    .turnPageRequest(crawlerRequestRecord)
                    .recordKey(nextPageUrl)
                    .httpUrl(nextPageUrl)
                    .releaseTime(System.currentTimeMillis())
                    .build();

            parsedLinks.add(turnPageRequest);

            List<Selectable> itemNodes=httpPage.getHtml().xpath("//div[@class=\"row list-content\"]/div/ul/li").nodes();

            for(Selectable node:itemNodes){
                String itemUrl=node.xpath("a/@href").get();
                itemUrl=url+itemUrl;
                if(itemUrl.matches("http://www\\.sh\\.gov\\.cn/\\S*/\\d*/\\S*.html")){
                    if (StringUtils.isBlank(itemUrl)){
                        continue;
                    }
                    String articleKey = itemUrl.substring(itemUrl.lastIndexOf("/") + 1).split("\\.")[0];
                    String pubTime = node.xpath("./span[@class=\"time\"]/text()").get();

                    try {
                        long releaseTime = DateUtils.parseDate(pubTime, "yyyy-MM-dd").getTime();

                        CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                                .itemPageRequest(crawlerRequestRecord)
                                .recordKey(itemUrl)
                                .httpUrl(itemUrl)
                                .needParsed(false)
                                .releaseTime(releaseTime)
                                .resultLabelTag(article)
                                .build();
                        parsedLinks.add(itemRecord);
                    } catch (ParseException e) {
                        e.printStackTrace();
                    }

                }

            }


        }

        return parsedLinks;
    }

    @Override
    public List<CrawlerData> washPage(CrawlerRequestRecord crawlerRecord, HttpPage page) {
        List<CrawlerData> crawlerDataList = new ArrayList<>();
        CrawlerResultTags crawlerResultTags = crawlerRecord.tagsCreator().resultTags();
        if (crawlerResultTags.hasDataType(article)) {
            crawlerDataList.add(washArticle(crawlerRecord, page));

        }

        if (crawlerResultTags.hasDataType(interaction)) {
            crawlerDataList.add(washInteraction(crawlerRecord, page));
        }

        return crawlerDataList;
    }

    public CrawlerData washArticle(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        Map<String,String> article = new HashMap<>();
        try {
            List<String> texts = httpPage.getHtml().xpath("//div[@id=\"ivs_content\"]/p/text()").all();

            String releaseTime = httpPage.getHtml().xpath("//small[@class=\"Article-time\"]/text()").get();
            releaseTime=releaseTime.substring(0,10);

            String source = httpPage.getHtml().xpath("//small[@class=\"Article-time\"]/span/text()").get();
            if(source!=null){
                source=source.substring(source.indexOf("：")+1);
            }

            StringBuffer conents = new StringBuffer();
            for (String text : texts) {
                conents.append(text).append(" ");
            }

            String itemUrl = httpPage.getRequest().getUrl();
            String articleKey = itemUrl.split("/")[itemUrl.split("/").length-2];

            CrawlerData crawlerData = CrawlerData.builder()
                    .data(crawlerRequestRecord, httpPage)
                    .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), CrawlerEnum.CrawlerDataType.article.enumVal(), articleKey))
                    .url(itemUrl)
                    .releaseTime(DateUtils.parseDate(releaseTime,"yyyy-MM-dd").getTime())
                    .addContentKV("content",conents.toString().trim())
                    .addContentKV("title",httpPage.getHtml().xpath("//h2[@class=\"Article-title\"]/text()").get())
                    .addContentKV("source",source)
                    .build();
            return crawlerData;
        } catch (ParseException e) {
            log.error(e.getMessage(),e);
        }
        return null;

    }

    public CrawlerData washInteraction(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {

        return null;
    }

    @Override
    public void afterExecute(CrawlerRecordContext crawlerRecordContext) {

    }
}
