package com.chance.cc.crawler.development.scripts.foodmate.tag;

import com.alibaba.fastjson.JSONObject;
import com.chance.cc.crawler.core.CrawlerEnum;
import com.chance.cc.crawler.core.CrawlerRecordContext;
import com.chance.cc.crawler.core.downloader.HttpPage;
import com.chance.cc.crawler.core.downloader.HttpRequest;
import com.chance.cc.crawler.core.pipeline.result.CrawlerData;
import com.chance.cc.crawler.core.record.CrawlerRecord;
import com.chance.cc.crawler.core.record.CrawlerRequestRecord;
import com.chance.cc.crawler.core.script.crawler.CrawlerCommonScript;
import com.chance.cc.crawler.core.selector.Json;
import com.chance.cc.crawler.core.selector.Selectable;
import com.chance.cc.crawler.core.tags.CategoryTag;
import com.chance.cc.crawler.core.tags.LabelTag;
import com.chance.cc.crawler.core.tags.crawler.CrawlerBusinessTags;
import com.chance.cc.crawler.development.scripts.allfeild.AICCommonField;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.time.DateUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.io.UnsupportedEncodingException;
import java.net.URLEncoder;
import java.text.ParseException;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;

import static com.chance.cc.crawler.core.CrawlerEnum.CrawlerDataType.article;

public class FoodMateCrawlerScript extends CrawlerCommonScript {

    private static Logger log = LoggerFactory.getLogger(FoodMateCrawlerScript.class);

    public static final String beginUrl = "http://news.foodmate.net/tag_1399.html";

    public static final String sortByTimeUrl = "http://news.foodmate.net/search.php\\?kw=\\S*&catid=0&order=2";

    public static final String nothingUrl = "http://news.foodmate.net/search.php\\?kw=\\S*";

    public static final String sortByTimeNextUrl = "http://news.foodmate.net/search.php\\?kw=\\S*&catid=0&order=2&page=\\d*";

    public static final String articleUrl = "http://news.foodmate.net/\\d{4}/\\d{2}/\\d*.html";

    public static final String SITE = "search";
    private static final String DOMAIN = "foodmate";

    private static final String REQUEST_AGAIN_TAG = DOMAIN + "_request_again";

    @Override
    public String domain() {
        return "foodmate";
    }

    @Override
    public boolean crawlerCheck(CrawlerRequestRecord crawlerRequestRecord) {
        CategoryTag categoryTag = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag();
        String crawlerSite = categoryTag.getKVTagStrVal("site");
        return SITE.equalsIgnoreCase(crawlerSite);
    }

    @Override
    public void initUrlRegulars() {
        addUrlRegular(beginUrl);
        addUrlRegular(sortByTimeUrl);
        addUrlRegular(nothingUrl);
        addUrlRegular(sortByTimeNextUrl);
        addUrlRegular(articleUrl);

    }

    public List<CrawlerRecord> prepareRequest(CrawlerRequestRecord crawlerRequestRecord, List<CrawlerRequestRecord> supportSourceRecords) {
        List<CrawlerRecord> crawlerRecords = new ArrayList<>();
        for (CrawlerRequestRecord supportSourceRecord : supportSourceRecords) {
            String url = supportSourceRecord.getHttpRequest().getUrl();
            if (url.contains("keys")) {
                initKeyword(crawlerRequestRecord, supportSourceRecord, crawlerRecords);
            }
        }
        return crawlerRecords;
    }

    private void initKeyword(CrawlerRequestRecord crawlerRequestRecord, CrawlerRequestRecord supportSourceRecord, List<CrawlerRecord> crawlerRecords) {
        HttpPage internalDownloadPage = supportSourceRecord.getInternalDownloadPage();
        Map<String, Object> extras = crawlerRequestRecord.getHttpRequest().getExtras();
        String searchSourceUrl = (String) extras.get("url");
        if (StringUtils.isBlank(searchSourceUrl)) {
            log.error("search kw source url can not null!");
            return;
        }
        Json json = internalDownloadPage.getJson();
        String msg = json.jsonPath($_type + ".msg").get();
        if (!"success".equals(msg)) {
            log.error("keyword page [{}] download error!", internalDownloadPage.getRequest().getUrl());
            return;
        }

        List<String> all = json.jsonPath($_type + ".content").all();
        for (String data : all) {
            JSONObject jsonObject = JSONObject.parseObject(data);
            String keyword = jsonObject.getString("keyword");

            try {
                String url = String.format(searchSourceUrl, URLEncoder.encode(keyword, "UTF-8"));
                CrawlerRequestRecord turnRecord = CrawlerRequestRecord.builder()
                        .turnPageRequest(crawlerRequestRecord)
                        .httpUrl(url)
                        .releaseTime(System.currentTimeMillis())
                        .notFilterRecord()
                        .copyBizTags()
                        .copyResultTags()
                        .build();

                turnRecord.getHttpRequest().addExtra("keyword",keyword);
                crawlerRecords.add(turnRecord);

            } catch (UnsupportedEncodingException e) {
                log.error(e.getMessage());
            }
        }

    }

    @Override
    public List<CrawlerRequestRecord> parseLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {

        List<CrawlerRequestRecord> parsedLinks = new ArrayList<CrawlerRequestRecord>();


        if (httpPage.getStatusCode() != 200 || (!httpPage.isDownloadSuccess())) {
            log.error("download page url == {} error status is {}", httpPage.getRequest().getUrl(), httpPage.getStatusCode());
            //如果没有成功的下载  进行重新下载
            this.requestAgainCrawlerRecord(parsedLinks, crawlerRequestRecord);
            crawlerRequestRecord.setNeedWashPage(false);
            return parsedLinks;
        }
        String url = crawlerRequestRecord.getHttpRequest().getUrl();
        if (url.matches(sortByTimeNextUrl)) {
            parseSortByTimeLinks(crawlerRequestRecord, httpPage, parsedLinks);
        }
        return parsedLinks;
    }


    private void parseSortByTimeLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {

        List<Selectable> nodes = httpPage.getHtml().xpath("//li[@class=\"catlist_li\"]").nodes();
        for (Selectable node : nodes) {
            String time = node.xpath("./span/text()").get();
            String itemUrl = node.xpath("./a/@href").get();

            long releaseTime = 0;
            try {
                releaseTime = DateUtils.parseDate(time, "yyyy-MM-dd HH:mm").getTime();
                CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                        .itemPageRequest(crawlerRequestRecord)
                        .recordKey(itemUrl)
                        .httpUrl(itemUrl)
                        .releaseTime(releaseTime)
                        .resultLabelTag(article)
                        .copyBizTags()
                        .build();

                itemRecord.setNeedWashPage(true);
                itemRecord.tagsCreator().bizTags().addCustomKV("time", releaseTime);
                parsedLinks.add(itemRecord);
            } catch (ParseException e) {
                e.printStackTrace();
            }

        }
        String s = nodes.get(nodes.size() - 1).xpath("./span/text()").get();
        long lastOneTime = 0;
        try {
            lastOneTime = DateUtils.parseDate(s, "yyyy-MM-dd HH:mm").getTime();
        } catch (ParseException e) {
            e.printStackTrace();
        }

        String url = httpPage.getRequest().getUrl();
        if (lastOneTime > System.currentTimeMillis() - 7 * 24 * 60 * 60 * 1000) {

            String prefix = url.substring(0, url.lastIndexOf("=") + 1);
            String page = url.substring(url.lastIndexOf("=") + 1);
            String nextPage = prefix + (Integer.parseInt(page) + 1 + "");
            CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                    .turnPageRequest(crawlerRequestRecord)
                    .recordKey(nextPage)
                    .httpUrl(nextPage)
                    .releaseTime(System.currentTimeMillis())
                    .copyBizTags()
                    .build();
            parsedLinks.add(itemRecord);
        }


    }

    @Override
    public List<CrawlerData> washPage(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        String url = httpPage.getRequest().getUrl();
        List<CrawlerData> crawlerDataList = new ArrayList<>();
        if (url.matches(articleUrl)) {
            crawlerDataList.add(washArticle(crawlerRequestRecord, httpPage));
        }
        return crawlerDataList;
    }

    private CrawlerData washArticle(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {

        String itemUrl = crawlerRequestRecord.getHttpRequest().getUrl();
        String articleKey = itemUrl.substring(itemUrl.lastIndexOf("/") + 1, itemUrl.lastIndexOf("."));
        String title = httpPage.getHtml().xpath("//h1[@id=\"title\"]").get();
        String time = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("time");
        String s = httpPage.getHtml().xpath("//div[@class=\"info\"]/text()").get();
        String source = s.substring(s.lastIndexOf("来源：") + 3);
        List<String> texts = httpPage.getHtml().xpath("//div[@id=\"article\"]//text()").all();
        String text1 = httpPage.getHtml().xpath("//div[@class=\"introduce\"]/text()").get();
        StringBuffer contents = new StringBuffer();
        for (String s1 : texts) {
            contents.append(s1);
        }
        String text = contents.toString();
        if (text.contains(" ") || text.contains("&#xD;") || text.contains("/n")) {
            text = text.replaceAll("\\n", "");
            text = text.replaceAll(" ", "");
            text = text.replaceAll("&#xD;", "");
            text = text1 + text;
        }

        StringBuffer pictureList = new StringBuffer();
        List<String> pictures = httpPage.getHtml().xpath("//div[@id=\"article\"]//img/@original").all();
        for (String picture : pictures) {
            pictureList.append(picture).append("/0X1");
        }

        CrawlerData crawlerData = CrawlerData.builder()
                .data(crawlerRequestRecord, httpPage)
                .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), CrawlerEnum.CrawlerDataType.article.enumVal(), articleKey))
                .resultLabelTag(CrawlerEnum.CrawlerDataType.article)
                .url(itemUrl)
                .releaseTime(Long.parseLong(time))
                .addContentKV(AICCommonField.Field_Content, text.trim())
                .addContentKV(AICCommonField.Field_Title, title.trim())
                .addContentKV(AICCommonField.Field_Author, source.trim())
                .addContentKV(AICCommonField.Field_Images, pictureList.toString())
                .build();

        return crawlerData;
    }


    @Override
    public void afterExecute(CrawlerRecordContext crawlerRecordContext) {


    }

    /*
     * 重新下载
     * */
    private void requestAgainCrawlerRecord(List<CrawlerRequestRecord> crawlerRequestRecords, CrawlerRequestRecord crawlerRecord) {
        CrawlerBusinessTags crawlerBusinessTags = crawlerRecord.tagsCreator().bizTags();
        int count = 0;
        if (crawlerBusinessTags.hasKVTag(REQUEST_AGAIN_TAG)) {
            count = (int) crawlerBusinessTags.getCategoryTag().getKVTag(REQUEST_AGAIN_TAG).getVal();
            if (count >= 10) {
                log.error("pcauto download page the number of retries exceeds the limit" +
                        ",request url {}", crawlerRecord.getHttpRequest().getUrl());
                return;
            }
        }

        String requestUrl = crawlerRecord.getHttpRequest().getUrl();
        CrawlerRequestRecord crawlerRequestRecord = null;
        LabelTag type = crawlerRecord.tagsCreator().requestTags().getCategoryTag().getLabelTags().get("turn_page_item_request");
        if (type == null) {
            crawlerRequestRecord = CrawlerRequestRecord.builder()
                    .turnPageRequest(crawlerRecord)
                    .httpUrl(requestUrl)
                    .recordKey(crawlerRecord.getRecordKey() + count)
                    .releaseTime(crawlerRecord.getReleaseTime())
                    .notFilterRecord()
                    .copyBizTags()
                    .copyResultTags()
                    .build();
        } else {
            crawlerRequestRecord = CrawlerRequestRecord.builder()
                    .itemPageRequest(crawlerRecord)
                    .httpUrl(requestUrl)
                    .recordKey(crawlerRecord.getRecordKey() + count)
                    .releaseTime(crawlerRecord.getReleaseTime())
                    .copyBizTags()
                    .copyResultTags()
                    .build();
        }

        crawlerRequestRecord.setNeedWashPage(crawlerRecord.isNeedWashPage());
        crawlerRequestRecord.setNeedParsedPage(crawlerRecord.isNeedParsedPage());
        crawlerRequestRecord.getHttpRequest().setHeaders(crawlerRecord.getHttpRequest().getHeaders());
        crawlerRequestRecord.getHttpRequest().setExtras(crawlerRecord.getHttpRequest().getExtras());
        crawlerRequestRecord.tagsCreator().bizTags().addCustomKV(REQUEST_AGAIN_TAG, ++count);
        crawlerRequestRecords.add(crawlerRequestRecord);
    }
}
