package com.chance.cc.crawler.development.scripts.foodmate;

import com.chance.cc.crawler.core.CrawlerEnum;
import com.chance.cc.crawler.core.CrawlerRecordContext;
import com.chance.cc.crawler.core.downloader.HttpPage;
import com.chance.cc.crawler.core.pipeline.result.CrawlerData;
import com.chance.cc.crawler.core.record.CrawlerRequestRecord;
import com.chance.cc.crawler.core.script.crawler.CrawlerCommonScript;
import com.chance.cc.crawler.core.selector.Selectable;
import com.chance.cc.crawler.core.tags.crawler.CrawlerResultTags;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.time.DateUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.text.ParseException;
import java.util.ArrayList;
import java.util.List;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

import static com.chance.cc.crawler.core.CrawlerEnum.CrawlerDataType.*;

public class FoodmateCrawlerScript extends CrawlerCommonScript {
    private Logger log=LoggerFactory.getLogger(FoodmateCrawlerScript.class);

    private static final String foodmatePrefix = "http://\\S*.foodmate.net";
    private static final String entranceUrl = "http://news.foodmate.net/";
    private static final String moduleEntranceUrl = "http://\\S*.foodmate.net/\\S*/";
    private static final String nextModuleArticleUrl = "http://\\S*.foodmate.net/\\S*/list_\\d+.html";
    private static final String articleUrl = "http://\\S*.foodmate.net/\\d{4}/\\d+/\\d+.html";
    private static final String articleCommentUrl = "http://\\S*.foodmate.net/comment/index.php\\?mid=21&itemid=\\d+";


    @Override
    public String domain() {
        return "foodmate";
    }

    @Override
    public boolean crawlerCheck(CrawlerRequestRecord crawlerRequestRecord) {
        return true;
    }

    @Override
    public void initUrlRegulars() {
        addUrlRegular(entranceUrl);
        addUrlRegular(moduleEntranceUrl);
        addUrlRegular(nextModuleArticleUrl);
        addUrlRegular(articleUrl);
        addUrlRegular(articleCommentUrl);
    }

    @Override
    public List<CrawlerRequestRecord> parseLinks(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        List<CrawlerRequestRecord> parsedLinks=new ArrayList<>();

        String requestUrl=httpPage.getRequest().getUrl();
        if(requestUrl.matches(entranceUrl)){
            List<String> pageList=httpPage.getHtml().xpath("//div[@class=\"m\"]/ul/li/a/@href").all();
            for (String listUrl:pageList){
                listUrl= listUrl.matches(moduleEntranceUrl) ? listUrl : getString(foodmatePrefix,requestUrl) + listUrl;
                if(listUrl.matches(moduleEntranceUrl)){
                    CrawlerRequestRecord turnPage = CrawlerRequestRecord.builder()
                            .turnPageRequest(crawlerRequestRecord)
                            .recordKey(listUrl)
                            .httpUrl(listUrl)
                            .releaseTime(System.currentTimeMillis())
                            .notFilterRecord()
                            .build();
                    parsedLinks.add(turnPage);
                }

            }
        }else if(requestUrl.matches(moduleEntranceUrl) || requestUrl.matches(nextModuleArticleUrl)) {
            List<Selectable> nodes1 = httpPage.getHtml().xpath("//div[@class=\"pages\"]/a").nodes();
            String nextPageUrl = "";
            for (Selectable selectable : nodes1) {
                String text = selectable.xpath("./text()").get();
                if (text.contains("下一页")) {
                    nextPageUrl = selectable.xpath("./@href").get();
                    break;
                }
            }
            if (StringUtils.isNotEmpty(nextPageUrl)) {
                CrawlerRequestRecord turnPageRequest = CrawlerRequestRecord.builder()
                        .turnPageRequest(crawlerRequestRecord)
                        .recordKey(nextPageUrl)
                        .httpUrl(nextPageUrl)
                        .releaseTime(System.currentTimeMillis())
                        .build();

                parsedLinks.add(turnPageRequest);
            }

            List<Selectable> nodes = httpPage.getHtml().xpath("//li[@class=\"catlist_li\"]").nodes();
            for (Selectable node : nodes) {
                String itemUrl = node.xpath("./a/@href").get();
                if (StringUtils.isBlank(itemUrl)) {
                    continue;
                }

                String pubTime = node.xpath(".//span/text()|.//span/a/text()").get();

                try {
                    long releaseTime = DateUtils.parseDate(pubTime, "yyyy-MM-dd HH:mm").getTime();

                    CrawlerRequestRecord itemRecord = CrawlerRequestRecord.builder()
                            .itemPageRequest(crawlerRequestRecord)
                            .recordKey(itemUrl)
                            .httpUrl(itemUrl)
                            .releaseTime(releaseTime)
                            .resultLabelTag(article)
                            .needParsed(false)
                            .build();
                    parsedLinks.add(itemRecord);

                    //解析评论地址
                    String commentUrl = "http://news.foodmate.net/comment/index.php?mid=21&itemid=" + itemUrl.substring(itemUrl.lastIndexOf("/") + 1,itemUrl.lastIndexOf("."));
                    CrawlerRequestRecord commentRecord = CrawlerRequestRecord.builder()
                            .itemPageRequest(crawlerRequestRecord)
                            .recordKey(commentUrl)
                            .httpUrl(commentUrl)
                            .releaseTime(releaseTime)
                            .resultLabelTag(comment)
                            .resultLabelTag(interaction)
                            .needParsed(false)
                            .build();
                    parsedLinks.add(commentRecord);
                } catch (ParseException e) {
                    e.printStackTrace();
                }
            }
        }


        return parsedLinks;
    }

    @Override
    public List<CrawlerData> washPage(CrawlerRequestRecord crawlerRecord, HttpPage page) {
        List<CrawlerData> crawlerDataList = new ArrayList<>();
        CrawlerResultTags crawlerResultTags = crawlerRecord.tagsCreator().resultTags();
        if (crawlerResultTags.hasDataType(article)) {
            crawlerDataList.add(washArticle(crawlerRecord, page));

        }
        //todo 互动量和评论还未弄完

//        if (crawlerResultTags.hasDataType(interaction)) {
//            crawlerDataList.add(washInteraction(crawlerRecord, page));
//        }
//
//        if(crawlerResultTags.hasDataType(comment)){
//            List<Selectable> nodes = page.getHtml().xpath("//div[@class=\"comment comment_sp\"]").nodes();
//            if(nodes.size() > 0){
//                for (Selectable node : nodes) {
//                    crawlerDataList.add(washComment(crawlerRecord,page,node));
//                }
//            }else{
//                log.info("评论为空！");
//            }
//        }


        return crawlerDataList;
    }

    public CrawlerData washArticle(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        try {
            String title = httpPage.getHtml().xpath("//h1[@id=\"title\"]/text()").get();
            String data = httpPage.getHtml().xpath("//div[@class=\"info\"]/text()").get();
            String[] split = data.split(" ");
            String author = "";
            String source = "";
            String releaseTime = "";
            for (String m : split) {
                if(StringUtils.isNotEmpty(m)){
                    if(m.contains("作者")){
                        author = m.substring(m.lastIndexOf("：") + 1);
                    }
                    if(m.contains("来源")){
                        source = m.substring(m.lastIndexOf("：") + 1);
                    }
                    if(m.contains("时间")){
                        releaseTime = getString("\\d{4}-\\d+-\\d+ \\d+:\\d+",m);
                    }
                }
            }

            List<String> all = httpPage.getHtml().xpath("//div[@class=\"introduce\"]//text()").all();
            List<String> texts = httpPage.getHtml().xpath("//div[@class=\"content\"]//text()").all();
            StringBuffer conents = new StringBuffer();
            for (String s : all) {
                conents.append(s).append(" ");
            }
            for (String text : texts) {
                conents.append(text).append(" ");
            }


            String itemUrl = httpPage.getRequest().getUrl();
            String articleKey = itemUrl.substring(itemUrl.lastIndexOf("/") + 1,itemUrl.lastIndexOf("."));

            CrawlerData crawlerData = CrawlerData.builder()
                    .data(crawlerRequestRecord, httpPage)
                    .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), CrawlerEnum.CrawlerDataType.article.enumVal(), articleKey))
                    .resultLabelTag(CrawlerEnum.CrawlerDataType.valueOf("article"))
                    .url(itemUrl)
                    .releaseTime(DateUtils.parseDate(releaseTime,"yyyy-MM-dd HH:mm").getTime())
                    .addContentKV("content",conents.toString().trim())
                    .addContentKV("title",title)
                    .addContentKV("source",source)
                    .addContentKV("author",author)
                    .build();
            return crawlerData;
        } catch (ParseException e) {
            log.error(e.getMessage(),e);
        }
        return null;

    }

    public CrawlerData washInteraction(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage) {
        String requestUrl=httpPage.getRequest().getUrl();
        String articleKey=requestUrl.substring(requestUrl.lastIndexOf("/")+1,requestUrl.lastIndexOf("."));
        String views=httpPage.getHtml().xpath("//div[@class=\"sjz\"]/a/text()").get();
        if (StringUtils.isNotBlank(views)){
            views=views.substring(views.lastIndexOf("：")+1);
            CrawlerData crawlerData=CrawlerData.builder()
                    .data(crawlerRequestRecord,httpPage)
                    .parentId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), article.enumVal(), articleKey))
                    .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), interaction.enumVal(), articleKey))
                    .url(requestUrl)
                    .releaseTime(crawlerRequestRecord.getReleaseTime())
                    .addContentKV("views",views)
                    .build();
            return crawlerData;
        }
        return null;
    }

    public CrawlerData washComment(CrawlerRequestRecord crawlerRequestRecord, HttpPage httpPage,Selectable node) {
        String requestUrl=httpPage.getRequest().getUrl();
        String articleKey=requestUrl.substring(requestUrl.lastIndexOf("/")+1,requestUrl.lastIndexOf("."));
        String views=httpPage.getHtml().xpath("//div[@class=\"sjz\"]/a/text()").get();
        if (StringUtils.isNotBlank(views)){
            views=views.substring(views.lastIndexOf("：")+1);
            CrawlerData crawlerData=CrawlerData.builder()
                    .data(crawlerRequestRecord,httpPage)
                    .parentId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), article.enumVal(), articleKey))
                    .dataId(StringUtils.joinWith("-", crawlerRequestRecord.getDomain(), interaction.enumVal(), articleKey))
                    .url(requestUrl)
                    .releaseTime(crawlerRequestRecord.getReleaseTime())
                    .addContentKV("views",views)
                    .build();
            return crawlerData;
        }
        return null;
    }
    @Override
    public void afterExecute(CrawlerRecordContext crawlerRecordContext) {

    }

    private static String getString(String regx, String input) {
        Pattern compile = Pattern.compile(regx);
        Matcher matcher = compile.matcher(input);
        List<String> urlList = new ArrayList<>();
        while (matcher.find()) {
            return matcher.group(0);
        }
        return null;
    }
}
