package com.gxljc.bear.crawler.itaogao.infzm;

import com.gxljc.commons.util.Pair;
import com.gxljc.bear.crawler.base.DolphinCrawlerConsts;
import com.gxljc.bear.crawler.base.DolphinFetchData;
import com.gxljc.bear.crawler.image.ImageFresh;
import com.gxljc.bear.crawler.itaogao.ItaogaoConst;
import com.gxljc.bear.crawler.itaogao.NewsPageTable;
import com.gxljc.bear.crawler.itaogao.util.MongodbUtil;
import com.gxljc.bear.crawler.proxy.ProxyUtil;
import com.gxljc.bear.crawler.util.HtmlUtil;
import com.gxljc.bear.crawler.util.MD5Util;
import com.gxljc.bear.crawler.util.SparkUtil;
import org.apache.commons.collections.CollectionUtils;
import org.apache.commons.lang.StringUtils;
import org.apache.log4j.Logger;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.FlatMapFunction;
import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;

import java.io.Serializable;
import java.util.*;

/**
 * 南方周末 update 数据爬虫。
 *
 * @author tanghaitao
 * @since 2022-9-21
 */
public class InfzmUpdateCrawler implements Serializable {
    public static Logger LOG = Logger.getLogger(InfzmUpdateCrawler.class);
    private Boolean proxy = false;

    public InfzmUpdateCrawler(Boolean proxy) {
        this.proxy = proxy;
    }

    public void crawl() throws Exception {
        List<String> seeds = getSeed();
        if (CollectionUtils.isEmpty(seeds)) {
            LOG.error("page is empty");
            return;
        }
        crawlNoSpark(seeds);
    }

    public void crawlNoSpark(List<String> seeds) {
        ProxyUtil.initQueue(proxy);
        for (String url : seeds) {
            try {
                crawl(url);
            } catch (Exception e) {
                e.printStackTrace();  //To change body of catch statement use File | Settings | File Templates.
            }
        }
    }

    public void crawl(List<String> seeds) {
        int coreMax = 10;
        JavaSparkContext jsc = SparkUtil.createCommonsSparkContext(
                "bear- 南方周末 种子爬取-" + seeds.size(), coreMax,
                coreMax * 2, InfzmUpdateCrawler.class);
        JavaRDD<String> seedsRDD = jsc.parallelize(new ArrayList<String>(seeds));
        long count = seedsRDD.mapPartitions(
                new FlatMapFunction<Iterator<String>, Integer>() {
                    private static final long serialVersionUID = 1L;

                    @Override
                    public Iterable<Integer> call(Iterator<String> seeds)
                            throws Exception {
                        int successCnt = 0;
                        while (seeds.hasNext()) {
                            try {
                                String seed = seeds.next();
                                int ret = crawl(seed);
                                if (ret == 1)
                                    successCnt++;
                                if (successCnt % 100 == 0) {
                                    LOG.info("successCnt=" + successCnt);
                                }
                            } catch (Exception er) {
                                er.printStackTrace();
                            }
                        }
                        return Arrays.asList(successCnt);
                    }
                }).count();
        LOG.info("all count=" + count);
        jsc.stop();
    }

    //单条url入口
    public int crawl(String url) throws Exception {
        LOG.info("crawl url = " + url);
        DolphinFetchData fetchData = DolphinFetchData.getInstance(DolphinCrawlerConsts.CrawlerChannelType.bear.getName());
        byte[] htmlByte = fetchData.getHtml(url, proxy, ItaogaoConst.CRAWL_TIMEOUT);
        if (htmlByte == null) {
            LOG.error("page result is empty url = " + url);
            return DolphinCrawlerConsts.RESULT_NO;
        }
        String html = new String(htmlByte, "utf-8");
        if (StringUtils.isEmpty(html)) {
            LOG.error("page result is empty url = " + url);
            return DolphinCrawlerConsts.RESULT_NO;
        }
        List<NewsPageTable> tables = extract(html, url);
        MongodbUtil.saveMongodb(tables, ItaogaoConst.MONGODB_TABLE.INFZM.getValue());
        return DolphinCrawlerConsts.RESULT_YES;
    }

    //缩略图抽取
    private String parseImage(String crawlUrl, String logoUrl) throws Exception {
        if (StringUtils.isEmpty(logoUrl)) return null;
        ImageFresh fresh = ImageFresh.getInstance();
        String newUrl = HtmlUtil.pictureUrlPref(logoUrl, crawlUrl);
        Pair<Integer, String> newPicPair = fresh.freshOne(newUrl);
        return newPicPair.second;
    }

    //抽取
    private List<NewsPageTable> extract(String html, String url) throws Exception {
        Document doc = Jsoup.parse(html);
        Elements elements = doc.select(".listContent");
        if (CollectionUtils.isEmpty(elements)) return null;
        List<NewsPageTable> tables = new LinkedList<>();
        for (Element element : elements) {
            String crawlUrl = element.select("h a").attr("href");
            String logoUrl = element.select(".picture a img").attr("src");
            NewsPageTable table = new NewsPageTable();
            crawlUrl = HtmlUtil.pictureUrlPref(crawlUrl, url);
            table.setId(genId(crawlUrl));
//            table.setCrawlFlag(0);
            table.setCrawlUrl(crawlUrl);
            String newLogoUrl = parseImage(crawlUrl, logoUrl);
            table.setLogoUrl(newLogoUrl);
            tables.add(table);
        }
        return tables;
    }

    //生成id
    private String genId(String href) {
        return MD5Util.getMd5(href);
    }

    //构造URL seed
    public List<String> getSeed() throws Exception {
        Map<String, Integer> seeds = new HashMap<String, Integer>();
        seeds.put("http://www.infzm.com/contents/13/%s", 1);
        seeds.put("http://www.infzm.com/contents/12/%s", 1);
        seeds.put("http://www.infzm.com/contents/11/%s", 1);
        seeds.put("http://www.infzm.com/contents/10/%s", 1);
        seeds.put("http://www.infzm.com/contents/8/%s", 1);
        seeds.put("http://www.infzm.com/contents/7/%s", 1);
        seeds.put("http://www.infzm.com/contents/6/%s", 1);
        seeds.put("http://www.infzm.com/contents/2594/%s", 1);
        seeds.put("http://www.infzm.com/contents/2556/%s", 1);
        seeds.put("http://www.infzm.com/contents/2554/%s", 1);
        seeds.put("http://www.infzm.com/contents/2611/%s", 1);
        seeds.put("http://www.infzm.com/contents/1374/%s", 1);
        List<String> urls = new LinkedList<String>();
        for (Map.Entry seed : seeds.entrySet()) {
            int max = (Integer) seed.getValue();
            String urlFormat = (String) seed.getKey();
            for (int i = 0; i < max; i++) {
                String url = String.format(urlFormat, i + "");
                urls.add(url);
            }
        }
        return urls;
    }

}
