package com.chance.cc.crawler.development.scripts.flightclub;

import com.alibaba.fastjson.JSONObject;
import com.chance.cc.crawler.core.CrawlerEnum;
import com.chance.cc.crawler.core.CrawlerRecordContext;
import com.chance.cc.crawler.core.downloader.HttpPage;
import com.chance.cc.crawler.core.pipeline.result.CrawlerData;
import com.chance.cc.crawler.core.record.CrawlerRecord;
import com.chance.cc.crawler.core.record.CrawlerRequestRecord;
import com.chance.cc.crawler.core.script.crawler.CrawlerCommonScript;
import com.chance.cc.crawler.core.selector.Json;
import com.chance.cc.crawler.core.selector.Selectable;
import com.chance.cc.crawler.core.tags.KVTag;
import com.chance.cc.crawler.core.tags.LabelTag;
import com.chance.cc.crawler.core.tags.crawler.CrawlerBusinessTags;
import com.chance.cc.crawler.development.scripts.allfeild.AICCommonField;
import com.chance.cc.crawler.development.scripts.allfeild.AutoVMCommonField;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.time.DateUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.text.ParseException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.regex.Pattern;

import static com.chance.cc.crawler.core.CrawlerEnum.CrawlerDataType.article;
import static com.chance.cc.crawler.core.CrawlerEnum.CrawlerDataType.interaction;

public class FlightclubCrawlerScript extends CrawlerCommonScript {
    private Logger log = LoggerFactory.getLogger(FlightclubCrawlerScript.class);//日志

    private static final String DOMAIN = "flightclub";
    private static final String SITE = "shoes";
    private static final String REQUEST_AGAIN_TAG = DOMAIN + "_request_again";

    //https://www.flightclub.cn/sneaker/search/nike/40
//    http://www.flightclub.cn/sneaker/search/nike/0
    private static final String ArticleListRegulars = "https://www.flightclub.cn/sneaker/search/\\S*";//动态获取城市链接
    //https://www.flightclub.cn/news/a/sneaker/2015/1021/27438.html
    private static final String ArticleRegulars = "https://www.flightclub.cn/news/a/sneaker/\\d*/\\d*/\\S*.html";//动态获取品牌网址
    //https://www.flightclub.cn/ajax/news_count/27338
    private static final String ArticleInteractionRegulars = "https://www.flightclub.cn/ajax/news_count/\\S*";//动态获取品牌网址

    @Override
    public String domain() {
        return DOMAIN;
    }//脚本domain定义:识别具体的脚本

    @Override
    public void initUrlRegulars() {//进入脚本的正则列表
        addUrlRegular(ArticleListRegulars);
        addUrlRegular(ArticleRegulars);
        addUrlRegular(ArticleInteractionRegulars);
    }

    public boolean crawlerCheck(CrawlerRequestRecord crawlerRequestRecord) {
        String site = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("site");
        return SITE.equals(site);
    }//是否执行脚本 ： 输入数据检查，合格的才进入脚本

    //获取客户需要的信息
    public List<CrawlerRecord> prepareRequest(CrawlerRequestRecord crawlerRequestRecord, List<CrawlerRequestRecord> supportSourceRecords) {
        List<CrawlerRecord> prepareLinks = new ArrayList<>();
        for (CrawlerRequestRecord supportSourceRecord : supportSourceRecords) {
            String url = supportSourceRecord.getHttpRequest().getUrl();
            if (url.contains("keys")) {
                this.initKeyWord(crawlerRequestRecord, supportSourceRecord);
            }
        }
        return prepareLinks;
    }

    //取出代表不同的车系key
    private void initKeyWord(CrawlerRequestRecord crawlerRequestRecord, CrawlerRequestRecord supportSourceRecord) {
        HttpPage internalDownloadPage = supportSourceRecord.getInternalDownloadPage();//获取页面
        Json json = internalDownloadPage.getJson();//获取json
        String msg = json.jsonPath($_type + ".msg").get();//判断是否成功
        if (!"success".equals(msg)) {
            log.error("keyword page [{}] download error!", internalDownloadPage.getRequest().getUrl());
            return;
        }
        try {
            Map<String, List<String>> cityAndBrandIdMap = new HashMap();//保存用户信息
            List<String> brandIdList = new ArrayList<>();//保存品牌
            List<String> all1 = json.jsonPath($_type + ".content").all();//获取内容
            for (String data : all1) {//遍历key
                JSONObject jsonObject = JSONObject.parseObject(data);
                String keyword = jsonObject.getString("keyword");//获取key值
                String[] trimUrl = Pattern.compile("[^0-9a-zA-z\\u4e00-\\u9fa5]").matcher(keyword).replaceAll(" ").replaceAll("\\s+", " ").replaceAll("\\[", "").replaceAll("\\]", "").trim().split(" ");//获取品牌id
                brandIdList.clear();//清空汽车
                String city = trimUrl[0];
                for (int i = 1; i < trimUrl.length; i++) {//保存城市和汽车
                    if (trimUrl[i] != "") {
                        brandIdList.add(trimUrl[i]);
                    }
                }
                cityAndBrandIdMap.put(city, brandIdList);
            }
            crawlerRequestRecord.tagsCreator().bizTags().addCustomKV("cityAndBrandId", cityAndBrandIdMap);//用户需求放入record中
        } catch (Exception e) {
            e.printStackTrace();
        }
    }

    //解析链接方法
    @Override
    public List<CrawlerRequestRecord> parseLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerRequestRecord> parsedLinks = new ArrayList();//创建脚本数组
//        判断页面是否加载成功
        String requestUrl = crawlerRequestRecord.getHttpRequest().getUrl();
        if (httpPage.getStatusCode() != 200 || (!httpPage.isDownloadSuccess())) {
            log.error("download page url == {null} error status is {}", requestUrl, httpPage.getStatusCode());
            if (httpPage.getStatusCode() != 404) {//如果没有成功的下载  进行重新下载
                this.requestAgainCrawlerRecord(parsedLinks, crawlerRequestRecord);
                return parsedLinks;
            }
        }

//        if (crawlerRequestRecord.getHttpRequest().getUrl().matches(ArticleListRegulars)) {//解析动态网页
//            homeRequest(crawlerRequestRecord, httpPage, parsedLinks, carNames, cityNames);
//        } else
        if (crawlerRequestRecord.getHttpRequest().getUrl().matches(ArticleListRegulars)) {//解析门店
            articleRequest(crawlerRequestRecord, httpPage, parsedLinks);
        }
        return parsedLinks;
    }

    //    解析动态车系
    private void homeRequest(CrawlerRequestRecord crawlerRecord, HttpPage page, List<CrawlerRequestRecord> parsedLinks) {
        KVTag keys = crawlerRecord.tagsCreator().bizTags().getCategoryTag().getKVTag("keys");
        List<String> name = (List<String>) keys.getVal();
        for (int i = 0; i < name.size(); i++) {//name.size()
            String url = "https://www.autohome.com.cn/" + name.get(i);//得到车系页面
            CrawlerRequestRecord record = CrawlerRequestRecord.builder()
                    .itemPageRequest(crawlerRecord)
                    .httpUrl(url)
                    .releaseTime(Long.valueOf(System.currentTimeMillis()))
                    .copyBizTags()
                    .copyResultTags()
                    .build();
            parsedLinks.add(record);
        }

    }

    //    主页面解析
    public static void articleRequest(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage, List<CrawlerRequestRecord> parsedLinks) {
        //          下一页解析
        String u = httpPage.getHtml().xpath("//a[./text()=\"下一页\"]/@href").get();
        if (u != null) {
            String nextPageUrl = "https://www.flightclub.cn" + u;//网址
            CrawlerRequestRecord turnPageRequest = CrawlerRequestRecord.builder()
                    .turnPageRequest(crawlerRequestRecord)
                    .recordKey(nextPageUrl)
                    .httpUrl(nextPageUrl)
                    .releaseTime(System.currentTimeMillis())
                    .copyBizTags()
                    .copyResultTags()
                    .needParsed(true)
                    .build();
            parsedLinks.add(turnPageRequest);
        }

        List<Selectable> nodes = httpPage.getHtml().xpath("//div[@class=\"left_info pure-u-14-24\"]").nodes();//所有文章
        for (Selectable node : nodes) {
            String articleUrl = "https://www.flightclub.cn" + node.xpath(".//a/@href").get();//文章url
            if (StringUtils.isBlank(articleUrl)) {
                continue;
            }
            crawlerRequestRecord.tagsCreator().bizTags().addCustomKV(AICCommonField.Field_Urls, "articleUrl");
            String pubTime = node.xpath(".//div[@class=\"senddate\"]/text()").get();//时间
            String[] articleId = articleUrl.replaceAll(".html", "/").split("/");
            String articleInteractionUrl = "https://www.flightclub.cn/ajax/news_count/" + articleId[articleId.length - 1];//文章互动量url

            try {
                CrawlerRequestRecord articleRecord = CrawlerRequestRecord.builder()
                        .itemPageRequest(crawlerRequestRecord)
                        .recordKey(articleUrl)
                        .httpUrl(articleUrl)
                        .releaseTime(DateUtils.parseDate(pubTime, "yyyy-MM-dd").getTime())
                        .copyBizTags()
                        .copyResultTags()
                        .needWashed(true)
                        .build();
                parsedLinks.add(articleRecord);

                CrawlerRequestRecord articleInteractionRecord = CrawlerRequestRecord.builder()
                        .itemPageRequest(crawlerRequestRecord)
                        .recordKey(articleInteractionUrl)
                        .httpUrl(articleInteractionUrl)
                        .releaseTime(DateUtils.parseDate(pubTime, "yyyy-MM-dd").getTime())
                        .copyBizTags()
                        .copyResultTags()
                        .needWashed(true)
                        .build();
                parsedLinks.add(articleInteractionRecord);
            } catch (ParseException e) {
                e.printStackTrace();
            }
        }
    }

    //  过滤冲洗页面
    @Override
    public List<CrawlerData> washPage(CrawlerRequestRecord crawlerRecord, HttpPage page) {
        List<CrawlerData> crawlerDataList = new ArrayList<>();//解析
        if (crawlerRecord.getHttpRequest().getUrl().matches(ArticleRegulars)) {
            if (crawlerRecord.tagsCreator().resultTags().hasDataType(article)) {
                crawlerDataList.addAll(washArticle(crawlerRecord, page));
            }
        }
        if (crawlerRecord.getHttpRequest().getUrl().matches(ArticleInteractionRegulars)) {
            if (crawlerRecord.tagsCreator().resultTags().hasDataType(interaction)) {
                crawlerDataList.addAll(washArticleInteraction(crawlerRecord, page));
            }
        }
        return crawlerDataList;
    }

    //内容互动量清洗
    public List<CrawlerData> washArticleInteraction(CrawlerRequestRecord crawlerRecord, HttpPage page) {
        log.info("内容互动量清洗");
        List<CrawlerData> crawlerArticleDataList = new ArrayList<>();
        String articleUrl = crawlerRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal(AICCommonField.Field_Urls);
        String views = page.getRawText().trim();
        String[] articleId = articleUrl.replaceAll(".html", "/").split("/");
        CrawlerData crawlerData = CrawlerData.builder()
                .data(crawlerRecord, page)
                .dataId(StringUtils.joinWith("-", domain(), interaction, articleId[articleId.length-1]))
                .parentId(StringUtils.joinWith("-", domain(), article, articleId[articleId.length-1]))
                .resultLabelTag(interaction)
                .url(articleUrl)
                .releaseTime(System.currentTimeMillis())
                .addContentKV(AICCommonField.Field_I_Views, views)
                .flowInPipelineTag("kafka")
                .build();
        crawlerData.setFilterPipelineResult(true);
        crawlerArticleDataList.add(crawlerData);
        return crawlerArticleDataList;

    }


    //内容清洗
    public List<CrawlerData> washArticle(CrawlerRequestRecord crawlerRecord, HttpPage page) {
        log.info("内容清洗");
        List<CrawlerData> crawlerArticleDataList = new ArrayList<>();
        try {
            String articleUrl = page.getRequest().getUrl();//文章url
            String title = page.getHtml().xpath("//h1/text()").get();//标题
            String releaseTime = page.getHtml().xpath("//div[@class=\"news_info left pure-u-1 pure-u-md-4-24\"]/div/div/text()").get();//时间
            String pubTime = releaseTime.replaceAll("发布时间", "").trim();
            List<String> tag = page.getHtml().xpath("//div[@class=\"tag\"]//text()").all();//标签
            List<String> texts = page.getHtml().xpath("//div[@class=\"content\"]//text()").all();//文章文本
            List<String> picture = page.getHtml().xpath("//div[@class=\"content\"]//img/@src").all();//图片
            StringBuffer conents = new StringBuffer();//将文本拼接
            for (String text : texts) {
                conents.append(text).append(" ");
            }
            StringBuffer pictures = new StringBuffer();//拼接图片
            for (String text : picture) {
                pictures.append(text).append("\0x1");
            }
            StringBuffer tags = new StringBuffer();//拼接标签
            for (String text : tag) {
                tags.append(text).append(" ");
            }
            String[] articleId = articleUrl.replaceAll(".html", "/").split("/");
            CrawlerData crawlerData = CrawlerData.builder()
                    .data(crawlerRecord, page)
                    .dataId(StringUtils.joinWith("-", crawlerRecord.getDomain(), article.enumVal(), articleId[articleId.length - 1]))
                    .resultLabelTag(article)
                    .url(articleUrl)
                    .releaseTime(DateUtils.parseDate(pubTime, "yyyy-MM-dd").getTime())
                    .addContentKV(AICCommonField.Field_Content, conents.toString().trim().replaceAll("&#xD;","").replaceAll("\\n","").replaceAll("\\(adsbygoogle = window.adsbygoogle \\|\\| \\[\\]\\).push\\(\\{\\}\\);",""))

                    .addContentKV(AICCommonField.Field_Title, title)
                    .addContentKV(AICCommonField.Field_Images, pictures.toString().trim())
                    .addContentKV(AutoVMCommonField.Tag_Field_Topic_Type, tags.toString().trim())
                    .flowInPipelineTag("kafka")
                    .build();
            crawlerData.setFilterPipelineResult(true);
            crawlerArticleDataList.add(crawlerData);
        } catch (Exception e) {
            log.error(e.getMessage(), e);
        }
        return crawlerArticleDataList;
    }


    //最后执行的方法
    @Override
    public void afterExecute(CrawlerRecordContext crawlerRecordContext) {
    }

    /*
     * 重新下载
     * */
    private void requestAgainCrawlerRecord(List<CrawlerRequestRecord> crawlerRequestRecords, CrawlerRequestRecord crawlerRecord) {
        CrawlerBusinessTags crawlerBusinessTags = crawlerRecord.tagsCreator().bizTags();
        int count = 0;
        if (crawlerBusinessTags.hasKVTag(REQUEST_AGAIN_TAG)) {
            count = (int) crawlerBusinessTags.getCategoryTag().getKVTag(REQUEST_AGAIN_TAG).getVal();
            if (count >= 10) {
                log.error("pcauto download page the number of retries exceeds the limit" +
                        ",request url {}", crawlerRecord.getHttpRequest().getUrl());
                return;
            }
        }

        String requestUrl = crawlerRecord.getHttpRequest().getUrl();
        CrawlerRequestRecord crawlerRequestRecord = null;
        LabelTag type = crawlerRecord.tagsCreator().requestTags().getCategoryTag().getLabelTags().get("turn_page_item_request");
        if (type == null) {
            crawlerRequestRecord = CrawlerRequestRecord.builder()
                    .turnPageRequest(crawlerRecord)
                    .httpUrl(requestUrl)
                    .recordKey(crawlerRecord.getRecordKey() + count)
                    .releaseTime(crawlerRecord.getReleaseTime())
                    .notFilterRecord()
                    .copyBizTags()
                    .copyResultTags()
                    .build();
        } else {
            crawlerRequestRecord = CrawlerRequestRecord.builder()
                    .itemPageRequest(crawlerRecord)
                    .httpUrl(requestUrl)
                    .recordKey(crawlerRecord.getRecordKey() + count)
                    .releaseTime(crawlerRecord.getReleaseTime())
                    .copyBizTags()
                    .copyResultTags()
                    .build();
        }

        crawlerRequestRecord.setNeedWashPage(crawlerRecord.isNeedWashPage());
        crawlerRequestRecord.setNeedParsedPage(crawlerRecord.isNeedParsedPage());
        crawlerRequestRecord.getHttpRequest().setHeaders(crawlerRecord.getHttpRequest().getHeaders());
        crawlerRequestRecord.getHttpRequest().setExtras(crawlerRecord.getHttpRequest().getExtras());
        crawlerRequestRecord.tagsCreator().bizTags().addCustomKV(REQUEST_AGAIN_TAG, ++count);
        crawlerRequestRecords.add(crawlerRequestRecord);
    }

}