package com.chance.cc.crawler.development.scripts.baufortune;

import com.chance.cc.crawler.core.CrawlerEnum;
import com.chance.cc.crawler.core.CrawlerRecordContext;
import com.chance.cc.crawler.core.downloader.HttpPage;
import com.chance.cc.crawler.core.pipeline.result.CrawlerData;
import com.chance.cc.crawler.core.record.CrawlerRequestRecord;
import com.chance.cc.crawler.core.script.crawler.CrawlerCommonScript;
import com.chance.cc.crawler.core.selector.Selectable;
import com.chance.cc.crawler.core.tags.crawler.CrawlerResultTags;
import org.apache.commons.lang3.StringEscapeUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.time.DateUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.text.ParseException;
import java.util.*;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

import static com.chance.cc.crawler.core.CrawlerEnum.CrawlerDataType.article;
import static com.chance.cc.crawler.core.CrawlerEnum.CrawlerDataType.interaction;
import static com.chance.cc.crawler.development.scripts.allfeild.AICCommonField.*;

/**
 * @Author Zhao.Hhuan
 * @Date Create in 2020/11/18 11:19
 * @Description 紫荆财智
 **/
public class BaufortuneCrawlerScript extends CrawlerCommonScript {

    private Logger log = LoggerFactory.getLogger(BaufortuneCrawlerScript.class);

    private static final List<String> modelList = Arrays.asList("Achievement", "IPO", "NewEconomy", "News", "Research", "Riches", "Video");

    /**
     * 脚本domain定义
     *
     * @return
     */
    @Override
    public String domain() {
        return "baufortune";
    }

    /**
     * 进入脚本的正则列表
     */
    @Override
    public void initUrlRegulars() {
        addUrlRegular("http://www.baufortune.com");//入口地址
        addUrlRegular("http://www.baufortune.com/\\S+");//下边模块地址
        addUrlRegular("http://www.baufortune.com/Info/\\S*");//资讯中的模块
        addUrlRegular("http://cj.takungpao.com/\\S*/text/\\d{4}/\\d{4}/\\d*.html"); //文章详情页
    }

    /**
     * 是否执行脚本 ： 输入数据检查，合格的才进入脚本
     *
     * @param crawlerRequestRecord
     * @return
     */
    @Override
    public boolean crawlerCheck(CrawlerRequestRecord crawlerRequestRecord) {
        return true;
    }


    /**
     * 解析链接方法
     *
     * @param crawlerRequestRecord
     * @param httpPage
     * @return
     */
    @Override
    public List<CrawlerRequestRecord> parseLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerRequestRecord> parsedLinks = new ArrayList<CrawlerRequestRecord>();

        String requestUrl = httpPage.getRequest().getUrl();

        if (requestUrl.matches("http://www.baufortune.com")) {
            //入口地址解析板块地址
            List<String> modelLinks = httpPage.getHtml().xpath("//div[@id=\"footer\"]/a/@href").all();
            for (String modelLink : modelLinks) {
                modelLink = requestUrl + modelLink;
//                http://www.baufortune.com/(Info|Columnist)
                if (modelLink.matches("http://www.baufortune.com/(Info|Columnist)")) {
                    CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                            .turnPageRequest(crawlerRequestRecord)
                            .httpUrl(modelLink)
                            .recordKey(modelLink)
                            .releaseTime(System.currentTimeMillis())
                            .notFilterRecord()
                            .build();

                    parsedLinks.add(itemRecord);
                }
            }
        } else if (requestUrl.matches("http://www.baufortune.com/Info")) {
            for (String modelLink : modelList) {
                modelLink = requestUrl + "/" + modelLink;
                if (StringUtils.isNotEmpty(modelLink)) {
                    CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                            .turnPageRequest(crawlerRequestRecord)
                            .httpUrl(modelLink)
                            .recordKey(modelLink)
                            .releaseTime(System.currentTimeMillis())
                            .notFilterRecord()
                            .build();

                    parsedLinks.add(itemRecord);
                }
            }
        } else if (requestUrl.matches("http://www.baufortune.com/Info/\\S*|http://www.baufortune.com/Columnist")) {
            String substring = requestUrl.substring(requestUrl.lastIndexOf("/") + 1);
            //文章列表解析
            List<Selectable> nodes = new ArrayList<>();
            List<Selectable> authorList = new ArrayList<>();
            if(substring.matches("Columnist")){
                nodes = httpPage.getHtml().xpath("//div[@class=\"intro js-article\"]/@data-url").nodes();
                authorList = httpPage.getHtml().xpath("//div[@class=\"title\"]/a/text()").nodes();
            }else if(substring.matches("Achievement")){
                nodes = httpPage.getHtml().xpath("//div[@class=\"cont\"]/a/@href").nodes();
            }else if(substring.matches("IPO|NewEconomy|News|Research|Riches")){
                nodes = httpPage.getHtml().xpath("//div[@class=\"title\"]/a/@href").nodes();
                List<Selectable> all = httpPage.getHtml().xpath("//div[@class=\"msg\"]/span/text()").nodes();
                for (Selectable selectable : all) {
                    String data = selectable.get();
                    if(!data.matches("\\d{2}-\\d{2} \\d{2}:\\d{2}")){
                        authorList.add(selectable);
                    }
                }
            }else if(substring.matches("Video")){
                nodes = httpPage.getHtml().xpath("//div[@class=\"swiper-slide\"]/a/@href|//div[@class=\"eve-playback\"]/a/@href").nodes();
                authorList = httpPage.getHtml().xpath("//span[@class=\"source\"]|//div[@class=\"video-source\"]/a").nodes();
            }
            for (int i = 0; i < nodes.size(); i++) {
                String itemUrl =  nodes.get(i).get();
                if(StringUtils.isEmpty(itemUrl)){
                    continue;
                }
                String releaseTime = getString("\\d{4}/\\d{4}", itemUrl);
                if (StringUtils.isBlank(releaseTime)) {
                    continue;
                }

                try {
                    Long releaseTimeToLong = DateUtils.parseDate(releaseTime, "yyyy/MMdd").getTime();

                    //文章
                    CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                            .itemPageRequest(crawlerRequestRecord)
                            .recordKey(itemUrl)
                            .httpUrl(itemUrl)
                            .releaseTime(releaseTimeToLong)
                            .needParsed(false)
                            .resultLabelTag(article)
                            .build();

                    if(authorList.size() > 0){
                        //添加作者信息
                        String author = "";
                        if(substring.matches("Video")){
                            author = authorList.get(i).xpath("./text()").get();
                        }else{
                            author = authorList.get(i).get();
                        }
                        author = StringUtils.isNotEmpty(author) ? author : "";
                        Map<String, Object> extras = new HashMap<>();
                        extras.put("author", author);
                        itemRecord.getHttpRequest().setExtras(extras);
                    }

                    parsedLinks.add(itemRecord);


                    //互动量
                    String interactionUrl = "http://www.baufortune.com/m/comment/appShow?rnd=" + itemUrl.substring(itemUrl.lastIndexOf("/") + 1, itemUrl.lastIndexOf("."));
                    CrawlerRequestRecord interactionRecord = CrawlerRequestRecord.builder()
                            .itemPageRequest(crawlerRequestRecord)
                            .recordKey(interactionUrl)
                            .httpUrl(interactionUrl)
                            .releaseTime(releaseTimeToLong)
                            .needParsed(false)
                            .resultLabelTag(interaction)
                            .build();

                    parsedLinks.add(interactionRecord);
                } catch (ParseException e) {
                    e.printStackTrace();
                }
            }
        }
        return parsedLinks;
    }

    @Override
    public List<CrawlerData> washPage(CrawlerRequestRecord crawlerRecord, HttpPage page) {
        List<CrawlerData> crawlerDataList = new ArrayList<>();
        CrawlerResultTags crawlerResultTags = crawlerRecord.tagsCreator().resultTags();
        if (crawlerResultTags.hasDataType(article)) {
            crawlerDataList.add(washArticle(crawlerRecord, page));

        }

        if (crawlerResultTags.hasDataType(interaction)) {
            crawlerDataList.add(washInteraction(crawlerRecord, page));
        }

        // todo
        return crawlerDataList;
    }

    public CrawlerData washArticle(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        int statusCode = httpPage.getStatusCode();
        try {
            List<String> articleTextList = httpPage.getHtml().xpath("//div[@class=\"tkp_content\"]//text()").all();

            StringBuffer conents = new StringBuffer();
            for (String articleText : articleTextList) {
                conents.append(StringEscapeUtils.unescapeHtml4(articleText)).append(" ");
            }

            String releaseTimeStr = "";
            String author = "";
            List<String> timeAndAuthor = httpPage.getHtml().xpath("//div[@class=\"tkp_con_author\"]/span/text()").all();
            for (String data : timeAndAuthor) {
                if (data.matches("\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2}")) {
                    releaseTimeStr = data;
                }else{
                    author = data;
                }
            }

            Map<String, Object> extras = httpPage.getRequest().getExtras();
            if (extras != null) {
                author = (String) extras.get("author");
            }

            String itemUrl = httpPage.getRequest().getUrl();
            String articleKey = itemUrl.substring(itemUrl.lastIndexOf("/") + 1, itemUrl.lastIndexOf("."));

            CrawlerData crawlerData = CrawlerData.builder()
                    .data(crawlerRequestRecord, httpPage)
                    .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), CrawlerEnum.CrawlerDataType.article.enumVal(), articleKey))
                    .resultLabelTag(CrawlerEnum.CrawlerDataType.valueOf("article"))
                    .url(itemUrl)
                    .releaseTime(DateUtils.parseDate(releaseTimeStr, "yyyy-MM-dd HH:mm:ss").getTime())
                    .addContentKV(Field_Content, conents.toString().trim())
                    .addContentKV(Field_Title, StringEscapeUtils.unescapeHtml4(httpPage.getHtml().xpath("//h2[@class=\"tkp_con_title\"]").get()))
                    .addContentKV(Field_Author, author)
                    .build();
            return crawlerData;
        } catch (ParseException e) {
            log.error("网页(“" + httpPage.getRequest().getUrl() + "”)发生错误，错误码是：" + statusCode);
        }
        return null;
    }

    public CrawlerData washInteraction(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        String itemUrl = httpPage.getRequest().getUrl();
        String articleKey = itemUrl.substring(itemUrl.lastIndexOf("=") + 1);

        CrawlerData crawlerData = CrawlerData.builder()
                .data(crawlerRequestRecord, httpPage)
                .parentId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), article.enumVal(), articleKey))
                .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), interaction.enumVal(), articleKey))
                .resultLabelTag(CrawlerEnum.CrawlerDataType.valueOf("interaction"))
                .url(itemUrl)
                .releaseTime(crawlerRequestRecord.getReleaseTime())
                .addContentKV(Field_I_Comments, httpPage.getHtml().xpath("//b[@id=\"comment_count\"]").get())
                .build();
        return crawlerData;
    }


    @Override
    public void afterExecute(CrawlerRecordContext crawlerRecordContext) {

    }

    /**
     * 获取指定格式的字符串
     *
     * @param regx
     * @param input
     * @return
     */
    private static String getString(String regx, String input) {
        Pattern compile = Pattern.compile(regx);
        Matcher matcher = compile.matcher(input);
        List<String> urlList = new ArrayList<>();
        while (matcher.find()) {
            return matcher.group(0);
        }
        return null;
    }

}
