/*
 * TOP SECRET Copyright 2006-2015 Transsion.com All right reserved. This software is the confidential and proprietary
 * information of Transsion.com ("Confidential Information"). You shall not disclose such Confidential Information and
 * shall use it only in accordance with the terms of the license agreement you entered into with Transsion.com.
 */
package com.yunji.framework_template.biz.crawler;

import java.util.ArrayList;
import java.util.Date;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.stream.Collectors;

import org.jsoup.nodes.Element;
import org.jsoup.parser.Tag;
import org.jsoup.select.Elements;
import org.springframework.beans.factory.annotation.Autowired;

import com.yunji.framework_template.biz.cache.CacheService;
import com.yunji.framework_template.biz.cache.EhDataCache;
import com.yunji.framework_template.biz.service.NewsCrawledService;
import com.yunji.framework_template.biz.service.NewsService;
import com.yunji.framework_template.biz.service.NewsSourcesService;
import com.yunji.framework_template.common.enumeration.NewsType;
import com.yunji.framework_template.common.http.HttpClientUtil;
import com.yunji.framework_template.common.util.MD5Util;
import com.yunji.framework_template.common.util.StringUtil;
import com.yunji.framework_template.common.web.HttpResult;
import com.yunji.framework_template.orm.datasource.HandleDataSource;
import com.yunji.framework_template.orm.persistence.model.News;
import com.yunji.framework_template.orm.persistence.model.NewsCrawled;
import com.yunji.framework_template.orm.persistence.vo.ImageVO;
import com.yunji.framework_template.orm.persistence.vo.NewsVo;
import lombok.extern.slf4j.Slf4j;

/**
 * ClassName:NewsCrawler 新闻爬虫<br/>
 * 注：实现的子类放在与当前类相同的包中，系统加自动加载所有的实现类并定时调试<br>
 * Date: 2018年11月26日 下午7:38:30 <br/>
 * 
 * @author fenglibin1982@163.com
 * @Blog http://blog.csdn.net/fenglibing
 * @version
 * @see
 */
@Slf4j
public abstract class NewsCrawler extends Crawler {

    @Autowired
    protected NewsService            newsService;
    @Autowired
    protected NewsCrawledService     newsCrawledService;
    @Autowired
    protected NewsSourcesService     newsSourcesService;
    @Autowired
    protected CacheService           cacheService;
    @Autowired
    protected EhDataCache            ehDataCache;
    /**
     * 用于判断当前实例是否在运行中
     */
    private boolean                  isRunning       = false;

    /**
     * 容器启动的时候会将所有的爬虫加入
     */
    private static List<NewsCrawler> newsCrawlerList = new ArrayList<NewsCrawler>();

    /**
     * 判断页面中的新闻URL是否是合适的URL，这个是通用的判断方法，具体的每个类中抓取逻辑还可以实现自己的URL判断方法
     * 
     * @param url
     * @return
     */
    public abstract boolean isOkUrl(String url);

    /**
     * 用于指定当前新闻类型，通过具体新闻内容页，获取其中的图片的处理实现
     * 
     * @return
     */
    public abstract ContentImageHandler getContentImageHandler();

    /**
     * 批量获取待爬取资源的类型，如果实现了该方法则表示其为批量抓取实现
     * 
     * @return
     */
    public abstract List<SourceType> getSourceTypeList();

    /**
     * 返回当前新闻所属的国家，用于在多国家多数据库的情况下将数据写到指定的国家对应的数据库中
     * 
     * @return
     */
    public abstract Set<String> getCountryCodeSet();

    /**
     * 新闻爬取的实现，抓取单个新闻源
     * 
     * @param sourceType
     * @return
     */
    public Map<String, NewsVo> crawleNews(SourceType sourceType) {
        Map<String, NewsVo> newsMap = new HashMap<String, NewsVo>();
        try {
            if (sourceType == null) {
                return newsMap;
            }
            HttpResult result = HttpClientUtil.doGet(sourceType.getUrl());
            if (result == null || result.getCode() != 200 || StringUtil.isEmpty(result.getContent())) {
                return newsMap;
            }
            Element contentElement = new Element(Tag.valueOf("contents"), sourceType.getUrl());
            contentElement.html(result.getContent());
            Elements urlEles = contentElement.select("a");
            for (Element element : urlEles) {
                String newsUrl = element.absUrl("href");
                UrlValidator urlValidator = sourceType.getUrlValidator();
                if (urlValidator != null) {
                    if (!urlValidator.isOkUrl(newsUrl)) {
                        continue;
                    }
                } else {
                    if (!isOkUrl(newsUrl)) {
                        continue;
                    }
                }
                /**
                 * 提取链接中的标题
                 */
                String title = null;
                if (sourceType.getTitleHandler() != null) {
                    title = sourceType.getTitleHandler().getTitle(element);
                } else {
                    title = defaultTitleHandler.getTitle(element);
                }
                if (title.length() < 10) {//小于10个字符，先放弃，可以避免一些特殊情况
                    continue;
                }
                String img = null;
                /**
                 * 提取链接中的所有图片
                 */
                img = getAllImagesFromHrefElement(element);
                if (StringUtil.isEmpty(img)) {// 再从内容中提取图片
                    ContentImageHandler contentImageHandler = null;
                    if (sourceType.getContentImageHandler() != null) {
                        contentImageHandler = sourceType.getContentImageHandler();
                    } else if (this.getContentImageHandler() != null) {
                        contentImageHandler = this.getContentImageHandler();
                    }
                    if (contentImageHandler != null) {
                        img = contentImageHandler.getImage(newsUrl);
                    }
                }

                NewsVo news = NewsVo.builder().url(newsUrl).title(title).img(img).build();
                NewsVo existsNewsVo = newsMap.get(newsUrl);
                if (existsNewsVo != null) {
                    if (StringUtil.isEmpty(existsNewsVo.getImg()) && !StringUtil.isEmpty(news.getImg())) {
                        existsNewsVo.setImg(news.getImg());
                    }
                    if (StringUtil.isEmpty(existsNewsVo.getTitle()) && !StringUtil.isEmpty(news.getTitle())) {
                        existsNewsVo.setTitle(news.getTitle());
                    }
                    newsMap.put(newsUrl, existsNewsVo);
                } else {
                    newsMap.put(newsUrl, news);
                }
            }
        } catch (Exception e) {
            log.error(this.getClass().getName() + ":" + e.getMessage(), e);
        }
        // 将title和image同时为空的过滤掉，标题太短的也去掉，被检测到为乱码的标题也去掉
        return newsMap.entrySet().stream().filter((e) -> {
            return (e.getValue().getTitle() != null || e.getValue().getImg() != null)
                   && !StringUtil.isMessyCode(e.getValue().getTitle());
        }).collect(Collectors.toMap((e) -> (String) e.getKey(), (e) -> e.getValue()));
    }

    /**
     * 调用新闻爬虫实现抓取新闻，然后再保存新闻。<br>
     * 可以针对个资源的新闻进行处理：通过实现方法getNewsSourceUrl() && getNewsType() <br>
     * 也可以针对多个新闻资源进行处理：通过实现方法getSourceTypeList() <br>
     * 或者同时处理：通过实现方法getNewsSourceUrl() && getNewsType() && getSourceTypeList() <br>
     */
    public void saveNews() {
        if (isRunning) {
            log.warn("当前新闻爬虫正在执行中，国家码：" + HandleDataSource.getDataSource() + "," + this.getClass().getName());
            return;
        }
        try {
            isRunning = true;
            // 对资源进行批量爬取
            List<SourceType> sourceTypeList = getSourceTypeList();
            if (sourceTypeList != null && sourceTypeList.size() > 0) {
                sourceTypeList.forEach((s) -> {
                    log.info("(1)开始对URL:" + s.getUrl() + "的资源进行抓取.");
                    Map<String, NewsVo> newsMap = crawleNews(s);
                    log.info("(1)抓取到:" + newsMap.size() + "资源准备入库.");
                    // HandleDataSource.putDataSource(this.getCurrentCountryCode());
                    int result = saveNews(newsMap, s.getNewsType());
                    log.info("(1)成功入库:" + result + "条资源.");
                    if (result > 0 && cacheService.getCountryCache().isCrawleSubPages()) {
                        // 多抓一层
                        for (Map.Entry<String, NewsVo> entry : newsMap.entrySet()) {
                            SourceType sourceType = SourceType.builder().url(entry.getValue().getUrl()).newsType(s.getNewsType()).contentImageHandler(s.getContentImageHandler()).titleHandler(s.getTitleHandler()).urlValidator(s.getUrlValidator()).build();
                            log.info("(2)开始对URL:" + sourceType.getUrl() + "的资源进行抓取.");
                            newsMap = crawleNews(sourceType);
                            log.info("(2)抓取到:" + newsMap.size() + "资源准备入库.");
                            // HandleDataSource.putDataSource(this.getCurrentCountryCode());
                            result = saveNews(newsMap, sourceType.getNewsType());
                            log.info("(2)成功入库:" + result + "条资源.");
                        }
                    }
                });
            }
        } finally {
            isRunning = false;
        }
    }

    /**
     * 将爬取到的资源进行入库操作
     * 
     * @param newsMap
     * @return 当前资源是否入库成功
     */
    private int saveNews(Map<String, NewsVo> newsMap, NewsType newsType) {
        int addNum = 0;
        if (newsMap == null || newsMap.size() == 0) {
            return addNum;
        }
        for (Map.Entry<String, NewsVo> entry : newsMap.entrySet()) {
            try {
                NewsVo newsVo = entry.getValue();
                String urlMd5 = MD5Util.md5Hex(newsVo.getUrl());
                /**
                 * 判断ehcache是否已经存在了
                 */
                Object obj = ehDataCache.get(urlMd5);
                if (obj != null) {// 已经存在了
                    continue;
                }

                NewsCrawled newsCrawled = newsCrawledService.selectByPrimaryKey(urlMd5);
                if (newsCrawled != null) {
                    addMd5Cache(urlMd5);
                    continue;
                }

                News news = new News();
                news.setUrlMd5(urlMd5);
                news.setNewsType(newsType.name());
                List<News> newsList = newsService.selectByCondition(news);
                if (newsList != null && newsList.size() > 0) {// 相同的URL已经存在
                    addMd5Cache(urlMd5, newsList.get(0));
                    continue;
                }

                news.setTitle(newsVo.getTitle());
                news.setImg(newsVo.getImg());
                news.setAdded(new Date());
                news.setUrl(newsVo.getUrl());
                news.setUrlMd5(urlMd5);

                List<ImageVO> imageVOList = getNewsImageList(news);
                if (checkImageMd5Exists(imageVOList)) {
                    continue;
                }

                addImageWidthHeight(news, imageVOList);
                /**
                 * 入库
                 */
                newsService.insert(news);

                newsCrawled = new NewsCrawled();
                newsCrawled.setNid(news.getId());
                newsCrawled.setUrlMd5(urlMd5);
                newsCrawled.setAdded(new Date());
                if (imageVOList != null && imageVOList.size() > 0) {
                    newsCrawled.setImgMd5(imageVOList.get(0).getImgMd5());
                }
                newsCrawledService.insert(newsCrawled);
                // 增加爬取的总记录数
                cacheService.getCache().getCrawledNews().incrementAndGet();

                addMd5Cache(urlMd5, news);
                addNum++;
            } catch (Exception e) {
                // 忽略掉主键冲突的问题
                /*
                 * if (!(e instanceof MySQLIntegrityConstraintViolationException)) {
                 * log.error("Save news exception happened:" + e.getMessage(), e); }
                 */
                log.error("Save news exception happened:" + e.getMessage(), e);
            }
        }

        return addNum;

    }

    /**
     * 获取所有实现的新闻爬虫
     * 
     * @return
     */
    public static List<NewsCrawler> getNewsCrawlerList() {
        return newsCrawlerList;
    }

}
