package com.chance.cc.crawler.development.scripts.wangyiyun;

import com.alibaba.fastjson.JSONArray;
import com.alibaba.fastjson.JSONObject;
import com.chance.cc.crawler.core.CrawlerEnum;
import com.chance.cc.crawler.core.CrawlerRecordContext;
import com.chance.cc.crawler.core.downloader.HttpPage;
import com.chance.cc.crawler.core.pipeline.result.CrawlerData;
import com.chance.cc.crawler.core.record.CrawlerRecord;
import com.chance.cc.crawler.core.record.CrawlerRequestRecord;
import com.chance.cc.crawler.core.script.crawler.CrawlerCommonScript;
import com.chance.cc.crawler.core.selector.Json;
import com.chance.cc.crawler.core.tags.LabelTag;
import com.chance.cc.crawler.core.tags.crawler.CrawlerBusinessTags;
import com.chance.cc.crawler.core.tags.crawler.CrawlerResultTags;
import org.apache.commons.lang3.StringUtils;
import org.apache.http.NameValuePair;
import org.apache.http.client.utils.URLEncodedUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.io.UnsupportedEncodingException;
import java.net.URLEncoder;
import java.nio.charset.Charset;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;

import static com.chance.cc.crawler.core.CrawlerEnum.CrawlerDataType.article;
import static com.chance.cc.crawler.development.scripts.allfeild.AICCommonField.Field_Author;
import static com.chance.cc.crawler.development.scripts.allfeild.AICCommonField.Field_Content;
import static com.chance.cc.crawler.development.scripts.allfeild.AutoVMCommonField.Field_Author_Follows;

/**
 * @Author Zhao.Hhuan
 * @Date Create in 2021/3/22 13:38
 * @Description
 *      网易云
 **/
public class WangYiYunCrawlerScript extends CrawlerCommonScript {

    private static Logger log = LoggerFactory.getLogger(WangYiYunCrawlerScript.class);
    private static final String DOMAIN = "wangyiyun";
    private static final String SITE = "searchKw";
    private static final String REQUEST_AGAIN_TAG = DOMAIN + "_request_again";
    private static final String SEARCH_KW_SOURCE_URL = "searchKwSourceUrl";

    private static final String WEB_URL = "http://music.163.com/api/search/pc\\S*";
    private static final String PLAY_LIST_SOURCE_URL = "http://music.163.com/api/v3/playlist/detail?updateTime=-1&id=%s";
    private static final String PLAY_LIST_URL = "http://music.163.com/api/v3/playlist/detail\\S*";
    private static final String SONG_SOURCE_URL = "http://music.163.com/api/song/detail/?ids=%s&id=%s";
    private static final String SONG_URL = "http://music.163.com/api/song/detail/\\S*";
    private static final String AUTHOR_HOME_SOURCE_URL = "https://music.163.com/user/home?id=%s";
    private static final String AUTHOR_HOME_URL = "https://music.163.com/user/home\\S*";

    /**
     * 脚本domain定义
     *
     * @return
     */
    @Override
    public String domain() {
        return DOMAIN;
    }

    /**
     * 进入脚本的正则列表
     */
    @Override
    public void initUrlRegulars() {
        addUrlRegular(WEB_URL);
        addUrlRegular(PLAY_LIST_URL);
        addUrlRegular(SONG_URL);
        addUrlRegular(AUTHOR_HOME_URL);
    }

    /**
     * 是否执行脚本 ： 输入数据检查，合格的才进入脚本
     *
     * @param crawlerRequestRecord
     * @return
     */
    @Override
    public boolean crawlerCheck(CrawlerRequestRecord crawlerRequestRecord) {
        String siteTag = crawlerRequestRecord.tagsCreator().bizTags().getCategoryTag().getKVTagStrVal("site");
        return siteTag.equals(SITE);
    }

    @Override
    public List<CrawlerRecord> prepareRequest(CrawlerRequestRecord requestRecord, List<CrawlerRequestRecord> supportSourceRecords) {
        List<CrawlerRecord> crawlerRecords = new ArrayList<>();
        for (CrawlerRequestRecord supportSourceRecord : supportSourceRecords) {
            String url = supportSourceRecord.getHttpRequest().getUrl();
            if (url.contains("keys")) {
                initKeyword(requestRecord, supportSourceRecord, crawlerRecords);
            }
        }
        return crawlerRecords;
    }

    /**
     * 解析链接方法
     *
     * @param crawlerRequestRecord
     * @param httpPage
     * @return
     */
    @Override
    public List<CrawlerRequestRecord> parseLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerRequestRecord> parsedLinks = new ArrayList<CrawlerRequestRecord>();

        if (!httpPage.isDownloadSuccess()) {
            log.error(DOMAIN + " page download error!will retry");
            requestAgainCrawlerRecord(parsedLinks, crawlerRequestRecord);
            crawlerRequestRecord.setNeedWashPage(false);
            return parsedLinks;
        }

        String requestUrl = httpPage.getRequest().getUrl();
        if(requestUrl.matches(WEB_URL)){
            webUrlRecord(crawlerRequestRecord,httpPage,parsedLinks);
        }

        if(requestUrl.matches(PLAY_LIST_URL)){
            playListUrlRecord(crawlerRequestRecord,httpPage,parsedLinks);
        }

        if(requestUrl.matches(SONG_URL)){
            songUrlRecord(crawlerRequestRecord,httpPage,parsedLinks);
        }

        return parsedLinks;
    }

    private void webUrlRecord(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage,List<CrawlerRequestRecord> parsedLinks){
        //翻页
        String requestUrl = httpPage.getRequest().getUrl();
        String[] split = requestUrl.split("\\?");
        String nextUrl = split[0] + "?";
        List<NameValuePair> parse = URLEncodedUtils.parse(split[1], Charset.defaultCharset());
        int offset = 0;
        int limit = 0;
        for (NameValuePair nameValuePair : parse) {
            String name = nameValuePair.getName();
            String value = nameValuePair.getValue();
            if("offset".equals(name)){
                offset = Integer.parseInt(value);
            }else{
                try {
                    nextUrl = nextUrl + name + "=" + URLEncoder.encode(value,"UTF-8") + "&";
                } catch (UnsupportedEncodingException e) {
                    log.error(e.getMessage());
                }
                if("limit".equals(name)){
                    limit = Integer.parseInt(value);
                }
            }
        }
        nextUrl = nextUrl + "offset=" + (offset + limit);
        String s = httpPage.getJson().jsonPath($_type + ".result.playlistCount").get();
        if(Integer.parseInt(s) > 0){
            CrawlerRequestRecord turnRecord = CrawlerRequestRecord.builder()
                    .turnPageRequest(crawlerRequestRecord)
                    .httpUrl(nextUrl)
                    .releaseTime(System.currentTimeMillis())
                    .httpHeads(crawlerRequestRecord.getHttpRequest().getHeaders())
                    .copyBizTags()
                    .needWashed(true)
                    .copyResultTags()
                    .build();
            parsedLinks.add(turnRecord);

        }

        //解析每一个歌单
        List<String> all = httpPage.getJson().jsonPath($_type + ".result.playlists").all();
        for (String data : all) {
            JSONObject jsonObject = JSONObject.parseObject(data);
            String id = jsonObject.getString("id");
            String url  = String.format(PLAY_LIST_SOURCE_URL,id);
            CrawlerRequestRecord requestRecord = CrawlerRequestRecord.builder()
                    .itemPageRequest(crawlerRequestRecord)
                    .httpUrl(url)
                    .releaseTime(System.currentTimeMillis())
                    .httpHeads(crawlerRequestRecord.getHttpRequest().getHeaders())
                    .copyBizTags()
                    .copyResultTags()
                    .build();
            requestRecord.tagsCreator().bizTags().addSiteBiz("song_list");
            parsedLinks.add(requestRecord);
        }
    }

    private void playListUrlRecord(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage,List<CrawlerRequestRecord> parsedLinks){
        try{
            //歌单作者信息
            String userId = httpPage.getJson().jsonPath($_type + ".playlist.creator.userId").get();
            if(StringUtils.isNotBlank(userId)){
                String url = String.format(AUTHOR_HOME_SOURCE_URL,userId);
                CrawlerRequestRecord record = CrawlerRequestRecord.builder()
                        .itemPageRequest(crawlerRequestRecord)
                        .httpUrl(url)
                        .releaseTime(System.currentTimeMillis())
                        .copyBizTags()
                        .copyResultTags()
                        .build();
                record.getHttpRequest().addHeader("Cookie","_ntes_nnid=6462bd936ffad1ce004cbcb726413d17,1612771118713; _ntes_nuid=6462bd936ffad1ce004cbcb726413d17; vinfo_n_f_l_n3=9d78fbf6ddfe9705.1.0.1612771118722.0.1612771135426; _iuqxldmzr_=32; NMTID=00O0r26vb9k45YsSkOzhpAZm_uVjxUAAAF3gQ2ucw; WM_TID=N47%2BQZ4eKe9BAEVEQUduf8FSUfOLKktD; WEVNSM=1.0.0; WNMCID=aoqopd.1616379559051.01.0; WM_NI=FAAIJWuF2%2BCrlhiZ24SZoouCTKuF9yOllFJ1xgsWBL4HQS%2BE2K9V1d8jWH5uZFEAhjPHW0HSFL4Jq3rESlIXqqfjL5k44rw1TBGFE4kiNuPI%2FdvTmwhZP9jUKHMrDATCNGk%3D; WM_NIKE=9ca17ae2e6ffcda170e2e6ee87f94d949288b4d56af4ef8eb6c84b829a9fafb6619c92bf9bee62ab9faab6f92af0fea7c3b92a869f8faac95db79afeccc27ab0b8a7bbb1409186a190f8499895bba7bc738db4a4a7c93bbcb9bdb8c148f8989fb4aa4ff295f7b7b470fcb699a9b87f9a9a8ba2eb62949899d1cb598694fcabf439b29b9d8dcc54b3afaa8bcf3fa9ecfc93f63b989e89a7c840a9b78296d343edbcbab5f07dac86a0dae95f89988393c74b989d82a8ea37e2a3; __csrf=89e53028a67935b9c4b8d45010c54901; MUSIC_U=b1b8bd6291933032932c61d83fd4b70c200b4e0980eb2e34dbc05fa697edcb7933d8bdbc89b8e7e5928e3bf6bc25cdfa217475eb0b9aeecae381395bf06ec255; ntes_kaola_ad=1; JSESSIONID-WYYY=YCUzq1AQd2y5fXyPJR9Gc9w27voukf%2BPd%2BNFzgFp856RSjccdYFe9FYO83R%5CGh0q9Hoe3oWQnmcl1djgT5fgzqCOXeA2GxoZ2TAYNkEFkArJkr%2FPQsEOaz5uZjJOiKmCWhXxq0Rf2F3%2F%2FEu19uH3%2BFCJ2c0%2Bstvj4N6OvwZg6ge0fUUl%3A1616400849567");
                record.tagsCreator().bizTags().addSiteBiz("author");
                parsedLinks.add(record);
            }

            //歌单详情页
            List<String> all = httpPage.getJson().jsonPath($_type + ".playlist.trackIds").all();
            for (String data : all) {
                JSONObject jsonObject = JSONObject.parseObject(data);
                String id = jsonObject.getString("id");
                String url = String.format(SONG_SOURCE_URL,URLEncoder.encode("["+id+"]","UTF-8"),id);
                CrawlerRequestRecord record = CrawlerRequestRecord.builder()
                        .itemPageRequest(crawlerRequestRecord)
                        .httpUrl(url)
                        .releaseTime(System.currentTimeMillis())
                        .httpHeads(crawlerRequestRecord.getHttpRequest().getHeaders())
                        .copyBizTags()
                        .copyResultTags()
                        .build();
                record.tagsCreator().bizTags().addSiteBiz("song_detail");
                parsedLinks.add(record);
            }
        }catch (Exception e){
            log.error(e.getMessage());
            requestAgainCrawlerRecord(parsedLinks,crawlerRequestRecord);
            crawlerRequestRecord.setNeedWashPage(false);
        }
    }

    private void songUrlRecord(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage,List<CrawlerRequestRecord> parsedLinks){
        //作者信息
        try{
            JSONArray artists = JSONObject.parseObject(httpPage.getJson().jsonPath($_type + ".songs").get()).getJSONArray("artists");
            for (Object s : artists) {
                JSONObject jsonObject = (JSONObject)s;
                String id = jsonObject.getString("id");
                String url = String.format(AUTHOR_HOME_SOURCE_URL,id);
                CrawlerRequestRecord record = CrawlerRequestRecord.builder()
                        .itemPageRequest(crawlerRequestRecord)
                        .httpUrl(url)
                        .releaseTime(System.currentTimeMillis())
                        .copyBizTags()
                        .copyResultTags()
                        .build();
                record.getHttpRequest().addHeader("Cookie","_ntes_nnid=6462bd936ffad1ce004cbcb726413d17,1612771118713; _ntes_nuid=6462bd936ffad1ce004cbcb726413d17; vinfo_n_f_l_n3=9d78fbf6ddfe9705.1.0.1612771118722.0.1612771135426; _iuqxldmzr_=32; NMTID=00O0r26vb9k45YsSkOzhpAZm_uVjxUAAAF3gQ2ucw; WM_TID=N47%2BQZ4eKe9BAEVEQUduf8FSUfOLKktD; WEVNSM=1.0.0; WNMCID=aoqopd.1616379559051.01.0; WM_NI=FAAIJWuF2%2BCrlhiZ24SZoouCTKuF9yOllFJ1xgsWBL4HQS%2BE2K9V1d8jWH5uZFEAhjPHW0HSFL4Jq3rESlIXqqfjL5k44rw1TBGFE4kiNuPI%2FdvTmwhZP9jUKHMrDATCNGk%3D; WM_NIKE=9ca17ae2e6ffcda170e2e6ee87f94d949288b4d56af4ef8eb6c84b829a9fafb6619c92bf9bee62ab9faab6f92af0fea7c3b92a869f8faac95db79afeccc27ab0b8a7bbb1409186a190f8499895bba7bc738db4a4a7c93bbcb9bdb8c148f8989fb4aa4ff295f7b7b470fcb699a9b87f9a9a8ba2eb62949899d1cb598694fcabf439b29b9d8dcc54b3afaa8bcf3fa9ecfc93f63b989e89a7c840a9b78296d343edbcbab5f07dac86a0dae95f89988393c74b989d82a8ea37e2a3; __csrf=89e53028a67935b9c4b8d45010c54901; MUSIC_U=b1b8bd6291933032932c61d83fd4b70c200b4e0980eb2e34dbc05fa697edcb7933d8bdbc89b8e7e5928e3bf6bc25cdfa217475eb0b9aeecae381395bf06ec255; ntes_kaola_ad=1; JSESSIONID-WYYY=YCUzq1AQd2y5fXyPJR9Gc9w27voukf%2BPd%2BNFzgFp856RSjccdYFe9FYO83R%5CGh0q9Hoe3oWQnmcl1djgT5fgzqCOXeA2GxoZ2TAYNkEFkArJkr%2FPQsEOaz5uZjJOiKmCWhXxq0Rf2F3%2F%2FEu19uH3%2BFCJ2c0%2Bstvj4N6OvwZg6ge0fUUl%3A1616400849567");
                record.tagsCreator().bizTags().addSiteBiz("author");
                parsedLinks.add(record);
            }
        }catch (Exception e){
            log.error(e.getMessage());
            requestAgainCrawlerRecord(parsedLinks,crawlerRequestRecord);
            crawlerRequestRecord.setNeedWashPage(false);
        }

    }
    @Override
    public List<CrawlerData> washPage(CrawlerRequestRecord crawlerRecord, HttpPage page) {
        List<CrawlerData> crawlerDataList = new ArrayList<>();
        CrawlerResultTags crawlerResultTags = crawlerRecord.tagsCreator().resultTags();
        String url = page.getRequest().getUrl();

        if (crawlerResultTags.hasDataType(article)) {
            if(url.matches(WEB_URL) || url.matches(PLAY_LIST_URL) || url.matches(SONG_URL)){
                crawlerDataList.add(washJsonArticle(crawlerRecord,page));
            }else if(url.matches(AUTHOR_HOME_URL)){
                crawlerDataList.add(washArticle(crawlerRecord, page));
            }
        }
        return crawlerDataList;
    }

    private CrawlerData washArticle(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        String itemUrl = httpPage.getRequest().getUrl();
        String articleKey = itemUrl.substring(itemUrl.lastIndexOf("=") + 1);

        String author = httpPage.getHtml().xpath("//span[@class=\"tit f-ff2 s-fc0 f-thide\"]").get();
        String follows = httpPage.getHtml().xpath("//strong[@id=\"fan_count\"]/text()").get();
        String content = httpPage.getHtml().xpath("//div[@class=\"inf s-fc3 f-brk\"]").get();

        CrawlerData crawlerData = CrawlerData.builder()
                    .data(crawlerRequestRecord, httpPage)
                    .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), CrawlerEnum.CrawlerDataType.article.enumVal(), articleKey))
                    .resultLabelTag(CrawlerEnum.CrawlerDataType.valueOf("article"))
                    .url(itemUrl)
                    .releaseTime(System.currentTimeMillis())
                    .addContentKV(Field_Content, content)
                    .addContentKV(Field_Author, author)
                    .addContentKV(Field_Author_Follows,follows)
                    .build();

        return crawlerData;
    }


    private CrawlerData washJsonArticle(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        String itemUrl = httpPage.getRequest().getUrl();
        String articleKey = itemUrl.substring(itemUrl.lastIndexOf("=") + 1);

        CrawlerData crawlerData = CrawlerData.builder()
                .data(crawlerRequestRecord, httpPage)
                .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), CrawlerEnum.CrawlerDataType.article.enumVal(), articleKey))
                .resultLabelTag(CrawlerEnum.CrawlerDataType.valueOf("article"))
                .url(itemUrl)
                .releaseTime(System.currentTimeMillis())
                .content(JSONObject.parseObject(httpPage.getRawText()).toJSONString())
                .build();
        return crawlerData;
    }

    private static String washNum(String text) {
        if (StringUtils.isBlank(text)) {
            return "0";
        }

        text = text.toLowerCase();
        if (text.contains("万")) {
            String[] split = text.split("万");
            String num = String.valueOf(Double.parseDouble(split[0].trim()) * 10000);
            return num.split("\\.")[0];
        }

        return text;
    }


    @Override
    public void afterExecute(CrawlerRecordContext crawlerRecordContext) {

    }

    private void requestAgainCrawlerRecord(List<CrawlerRequestRecord> crawlerRequestRecords, CrawlerRequestRecord crawlerRecord) {
        CrawlerBusinessTags crawlerBusinessTags = crawlerRecord.tagsCreator().bizTags();
        int count = 0;
        if (crawlerBusinessTags.hasKVTag(REQUEST_AGAIN_TAG)) {
            count = (int) crawlerBusinessTags.getCategoryTag().getKVTag(REQUEST_AGAIN_TAG).getVal();
            if (count >= 5) {
                log.error(DOMAIN + " download page the number of retries exceeds the limit" +
                        ",request url {}", crawlerRecord.getHttpRequest().getUrl());
                return;
            }
        }

        String requestUrl = crawlerRecord.getHttpRequest().getUrl();
        LabelTag type = crawlerRecord.tagsCreator().requestTags().getCategoryTag().getLabelTags().get("turn_page_item_request");
        CrawlerRequestRecord crawlerRequestRecord = null;
        if (type == null) {
            crawlerRequestRecord = CrawlerRequestRecord.builder()
                    .turnPageRequest(crawlerRecord)
                    .httpUrl(requestUrl)
                    .releaseTime(crawlerRecord.getReleaseTime())
                    .recordKey(crawlerRecord.getRecordKey() + count)
                    .notFilterRecord()
                    .copyBizTags()
                    .copyResultTags()
                    .build();
        } else {
            crawlerRequestRecord = CrawlerRequestRecord.builder()
                    .itemPageRequest(crawlerRecord)
                    .httpUrl(requestUrl)
                    .releaseTime(crawlerRecord.getReleaseTime())
                    .recordKey(crawlerRecord.getRecordKey() + count)
                    .copyBizTags()
                    .copyResultTags()
                    .build();
        }

        if (crawlerRequestRecord == null) {
            return;
        }

        crawlerRequestRecord.getHttpRequest().setExtras(crawlerRecord.getHttpRequest().getExtras());
        crawlerRequestRecord.getHttpRequest().setHeaders(crawlerRecord.getHttpRequest().getHeaders());
        crawlerRequestRecord.setNeedWashPage(crawlerRecord.isNeedWashPage());
        crawlerRequestRecord.setNeedParsedPage(crawlerRecord.isNeedParsedPage());
        crawlerRequestRecord.tagsCreator().bizTags().addCustomKV(REQUEST_AGAIN_TAG, ++count);
        crawlerRequestRecords.add(crawlerRequestRecord);
    }

    private void initKeyword(CrawlerRequestRecord requestRecord, CrawlerRequestRecord supportSourceRecord, List<CrawlerRecord> crawlerRecords) {
        HttpPage internalDownloadPage = supportSourceRecord.getInternalDownloadPage();
        Map<String, Object> extras = requestRecord.getHttpRequest().getExtras();
        String searchSourceUrl = (String) extras.get(SEARCH_KW_SOURCE_URL);
        if (StringUtils.isBlank(searchSourceUrl)) {
            log.error("search kw source url can not null!");
            return;
        }
        Json json = internalDownloadPage.getJson();
        String msg = json.jsonPath($_type + ".msg").get();
        if (!"success".equals(msg)) {
            log.error("keyword page [{}] download error!", internalDownloadPage.getRequest().getUrl());
            return;
        }

        List<String> all = json.jsonPath($_type + ".content").all();
        for (String data : all) {
            JSONObject jsonObject = JSONObject.parseObject(data);
            String keyword = jsonObject.getString("keyword");

            try {
                String url = String.format(searchSourceUrl, URLEncoder.encode(keyword, "UTF-8"));
                CrawlerRequestRecord turnRecord = CrawlerRequestRecord.builder()
                        .turnPageRequest(requestRecord)
                        .httpUrl(url)
                        .releaseTime(System.currentTimeMillis())
                        .notFilterRecord()
                        .copyBizTags()
                        .copyResultTags()
                        .build();
                turnRecord.tagsCreator().bizTags().addKeywords(keyword);
                crawlerRecords.add(turnRecord);
            } catch (UnsupportedEncodingException e) {
                log.error(e.getMessage());
            }
        }
    }
}
